aboutsummaryrefslogtreecommitdiffstats
path: root/target-arm
diff options
context:
space:
mode:
authorThe Android Open Source Project <initial-contribution@android.com>2009-02-10 15:43:59 -0800
committerThe Android Open Source Project <initial-contribution@android.com>2009-02-10 15:43:59 -0800
commitc27f813900a3c114562efbb8df1065e94766fc48 (patch)
treed95919283707dcab61009e27007374a745c9541e /target-arm
parent0852ad57fa372f9b2854e4df685eaba8d8ef6790 (diff)
downloadexternal_qemu-c27f813900a3c114562efbb8df1065e94766fc48.zip
external_qemu-c27f813900a3c114562efbb8df1065e94766fc48.tar.gz
external_qemu-c27f813900a3c114562efbb8df1065e94766fc48.tar.bz2
auto import from //branches/cupcake/...@130745
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h284
-rw-r--r--target-arm/exec.h48
-rw-r--r--target-arm/helper.c2262
-rw-r--r--target-arm/helpers.h548
-rw-r--r--target-arm/iwmmxt_helper.c682
-rw-r--r--target-arm/machine.c218
-rw-r--r--target-arm/neon_helper.c1457
-rw-r--r--target-arm/nwfpe/double_cpdo.c296
-rw-r--r--target-arm/nwfpe/extended_cpdo.c273
-rw-r--r--target-arm/nwfpe/fpa11.c237
-rw-r--r--target-arm/nwfpe/fpa11.h122
-rw-r--r--target-arm/nwfpe/fpa11.inl51
-rw-r--r--target-arm/nwfpe/fpa11_cpdo.c117
-rw-r--r--target-arm/nwfpe/fpa11_cpdt.c376
-rw-r--r--target-arm/nwfpe/fpa11_cprt.c290
-rw-r--r--target-arm/nwfpe/fpopcode.c148
-rw-r--r--target-arm/nwfpe/fpopcode.h390
-rw-r--r--target-arm/nwfpe/fpsr.h108
-rw-r--r--target-arm/nwfpe/single_cpdo.c255
-rw-r--r--target-arm/op.c1230
-rw-r--r--target-arm/op_addsub.h103
-rw-r--r--target-arm/op_helper.c641
-rw-r--r--target-arm/op_mem.h118
-rw-r--r--target-arm/op_template.h53
-rw-r--r--target-arm/translate.c8164
25 files changed, 13061 insertions, 5410 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index a613b77..ff765f7 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -22,12 +22,12 @@
#define TARGET_LONG_BITS 32
+#define ELF_MACHINE EM_ARM
+
#include "cpu-defs.h"
#include "softfloat.h"
-#define ARM_CPU_SAVE_VERSION 1
-
#define TARGET_HAS_ICE 1
#define EXCP_UDEF 1 /* undefined instruction */
@@ -37,6 +37,28 @@
#define EXCP_IRQ 5
#define EXCP_FIQ 6
#define EXCP_BKPT 7
+#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
+#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
+
+#define ARMV7M_EXCP_RESET 1
+#define ARMV7M_EXCP_NMI 2
+#define ARMV7M_EXCP_HARD 3
+#define ARMV7M_EXCP_MEM 4
+#define ARMV7M_EXCP_BUS 5
+#define ARMV7M_EXCP_USAGE 6
+#define ARMV7M_EXCP_SVC 11
+#define ARMV7M_EXCP_DEBUG 12
+#define ARMV7M_EXCP_PENDSV 14
+#define ARMV7M_EXCP_SYSTICK 15
+
+typedef void ARMWriteCPFunc(void *opaque, int cp_info,
+ int srcreg, int operand, uint32_t value);
+typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info,
+ int dstreg, int operand);
+
+struct arm_boot_info;
+
+#define NB_MMU_MODES 2
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -50,7 +72,7 @@ typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
/* Frequently accessed CPSR bits are stored separately for efficiently.
- This contains all the other bits. Use cpsr_{read,write} to accless
+ This contains all the other bits. Use cpsr_{read,write} to access
the whole CPSR. */
uint32_t uncached_cpsr;
uint32_t spsr;
@@ -67,53 +89,100 @@ typedef struct CPUARMState {
/* cpsr flag cache for faster execution */
uint32_t CF; /* 0 or 1 */
uint32_t VF; /* V is the bit 31. All other bits are undefined */
- uint32_t NZF; /* N is bit 31. Z is computed from NZF */
+ uint32_t NF; /* N is bit 31. All other bits are undefined. */
+ uint32_t ZF; /* Z set if zero. */
uint32_t QF; /* 0 or 1 */
-
- int thumb; /* 0 = arm mode, 1 = thumb mode */
+ uint32_t GE; /* cpsr[19:16] */
+ uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
+ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
/* System control coprocessor (cp15) */
struct {
uint32_t c0_cpuid;
+ uint32_t c0_cachetype;
+ uint32_t c0_c1[8]; /* Feature registers. */
+ uint32_t c0_c2[8]; /* Instruction set registers. */
uint32_t c1_sys; /* System control register. */
uint32_t c1_coproc; /* Coprocessor access register. */
- uint32_t c2; /* MMU translation table base. */
- uint32_t c3; /* MMU domain access control register. */
+ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
+ uint32_t c2_base0; /* MMU translation table base 0. */
+ uint32_t c2_base1; /* MMU translation table base 1. */
+ uint32_t c2_mask; /* MMU translation table base mask. */
+ uint32_t c2_data; /* MPU data cachable bits. */
+ uint32_t c2_insn; /* MPU instruction cachable bits. */
+ uint32_t c3; /* MMU domain access control register
+ MPU write buffer control. */
uint32_t c5_insn; /* Fault status registers. */
uint32_t c5_data;
+ uint32_t c6_region[8]; /* MPU base/size registers. */
uint32_t c6_insn; /* Fault address registers. */
uint32_t c6_data;
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
uint32_t c13_fcse; /* FCSE PID. */
uint32_t c13_context; /* Context ID. */
+ uint32_t c13_tls1; /* User RW Thread register. */
+ uint32_t c13_tls2; /* User RO Thread register. */
+ uint32_t c13_tls3; /* Privileged Thread register. */
+ uint32_t c15_cpar; /* XScale Coprocessor Access Register */
+ uint32_t c15_ticonfig; /* TI925T configuration byte. */
+ uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
+ uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
+ uint32_t c15_threadid; /* TI debugger thread-ID. */
} cp15;
+ struct {
+ uint32_t other_sp;
+ uint32_t vecbase;
+ uint32_t basepri;
+ uint32_t control;
+ int current_sp;
+ int exception;
+ int pending_exception;
+ void *nvic;
+ } v7m;
+
+ /* Coprocessor IO used by peripherals */
+ struct {
+ ARMReadCPFunc *cp_read;
+ ARMWriteCPFunc *cp_write;
+ void *opaque;
+ } cp[15];
+
/* Internal CPU feature flags. */
uint32_t features;
- /* exception/interrupt handling */
- jmp_buf jmp_env;
- int exception_index;
- int interrupt_request;
- int user_mode_only;
- int halted;
+ /* Callback for vectored interrupt controller. */
+ int (*get_irq_vector)(struct CPUARMState *);
+ void *irq_opaque;
/* VFP coprocessor state. */
struct {
- float64 regs[16];
+ float64 regs[32];
uint32_t xregs[16];
/* We store these fpcsr fields separately for convenience. */
int vec_len;
int vec_stride;
- /* Temporary variables if we don't have spare fp regs. */
- float32 tmp0s, tmp1s;
- float64 tmp0d, tmp1d;
+ /* scratch space when Tn are not sufficient. */
+ uint32_t scratch[8];
float_status fp_status;
} vfp;
+#if defined(CONFIG_USER_ONLY)
+ struct mmon_state *mmon_entry;
+#else
+ uint32_t mmon_addr;
+#endif
+
+ /* iwMMXt coprocessor state. */
+ struct {
+ uint64_t regs[16];
+ uint64_t val;
+
+ uint32_t cregs[16];
+ } iwmmxt;
#if defined(CONFIG_USER_ONLY)
/* For usermode syscall translation. */
@@ -122,21 +191,31 @@ typedef struct CPUARMState {
CPU_COMMON
+ /* These fields after the common ones so they are preserved on reset. */
+ struct arm_boot_info *boot_info;
} CPUARMState;
-CPUARMState *cpu_arm_init(void);
+CPUARMState *cpu_arm_init(const char *cpu_model);
+void arm_translate_init(void);
int cpu_arm_exec(CPUARMState *s);
void cpu_arm_close(CPUARMState *s);
void do_interrupt(CPUARMState *);
void switch_mode(CPUARMState *, int);
+uint32_t do_arm_semihosting(CPUARMState *env);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
-struct siginfo;
-int cpu_arm_signal_handler(int host_signum, struct siginfo *info,
+int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
+void cpu_lock(void);
+void cpu_unlock(void);
+static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
+{
+ env->cp15.c13_tls2 = newtls;
+}
+
#define CPSR_M (0x1f)
#define CPSR_T (1 << 5)
#define CPSR_F (1 << 6)
@@ -144,42 +223,65 @@ int cpu_arm_signal_handler(int host_signum, struct siginfo *info,
#define CPSR_A (1 << 8)
#define CPSR_E (1 << 9)
#define CPSR_IT_2_7 (0xfc00)
-/* Bits 20-23 reserved. */
+#define CPSR_GE (0xf << 16)
+#define CPSR_RESERVED (0xf << 20)
#define CPSR_J (1 << 24)
#define CPSR_IT_0_1 (3 << 25)
#define CPSR_Q (1 << 27)
-#define CPSR_NZCV (0xf << 28)
+#define CPSR_V (1 << 28)
+#define CPSR_C (1 << 29)
+#define CPSR_Z (1 << 30)
+#define CPSR_N (1 << 31)
+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
+
+#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
+#define CACHED_CPSR_BITS (CPSR_T | CPSR_GE | CPSR_IT | CPSR_Q | CPSR_NZCV)
+/* Bits writable in user mode. */
+#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
+/* Execution state bits. MRS read as zero, MSR writes ignored. */
+#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J)
-#define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV)
/* Return the current CPSR value. */
-static inline uint32_t cpsr_read(CPUARMState *env)
+uint32_t cpsr_read(CPUARMState *env);
+/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask);
+
+/* Return the current xPSR value. */
+static inline uint32_t xpsr_read(CPUARMState *env)
{
int ZF;
- ZF = (env->NZF == 0);
- return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
- (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
- | (env->thumb << 5);
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | env->v7m.exception;
}
-/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
-static inline void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
+/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
+static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
{
- /* NOTE: N = 1 and Z = 1 cannot be stored currently */
if (mask & CPSR_NZCV) {
- env->NZF = (val & 0xc0000000) ^ 0x40000000;
+ env->ZF = (~val) & CPSR_Z;
+ env->NF = val;
env->CF = (val >> 29) & 1;
env->VF = (val << 3) & 0x80000000;
}
if (mask & CPSR_Q)
env->QF = ((val & CPSR_Q) != 0);
- if (mask & CPSR_T)
- env->thumb = ((val & CPSR_T) != 0);
-
- if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
- switch_mode(env, val & CPSR_M);
+ if (mask & (1 << 24))
+ env->thumb = ((val & (1 << 24)) != 0);
+ if (mask & CPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & CPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & 0x1ff) {
+ env->v7m.exception = val & 0x1ff;
}
- mask &= ~CACHED_CPSR_BITS;
- env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
}
enum arm_cpu_mode {
@@ -195,14 +297,37 @@ enum arm_cpu_mode {
/* VFP system registers. */
#define ARM_VFP_FPSID 0
#define ARM_VFP_FPSCR 1
+#define ARM_VFP_MVFR1 6
+#define ARM_VFP_MVFR0 7
#define ARM_VFP_FPEXC 8
#define ARM_VFP_FPINST 9
#define ARM_VFP_FPINST2 10
+/* iwMMXt coprocessor control registers. */
+#define ARM_IWMMXT_wCID 0
+#define ARM_IWMMXT_wCon 1
+#define ARM_IWMMXT_wCSSF 2
+#define ARM_IWMMXT_wCASF 3
+#define ARM_IWMMXT_wCGR0 8
+#define ARM_IWMMXT_wCGR1 9
+#define ARM_IWMMXT_wCGR2 10
+#define ARM_IWMMXT_wCGR3 11
enum arm_features {
ARM_FEATURE_VFP,
- ARM_FEATURE_AUXCR /* ARM1026 Auxiliary control register. */
+ ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
+ ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
+ ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
+ ARM_FEATURE_V6,
+ ARM_FEATURE_V6K,
+ ARM_FEATURE_V7,
+ ARM_FEATURE_THUMB2,
+ ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
+ ARM_FEATURE_VFP3,
+ ARM_FEATURE_NEON,
+ ARM_FEATURE_DIV,
+ ARM_FEATURE_M, /* Microcontroller profile. */
+ ARM_FEATURE_OMAPCP /* OMAP specific CP15 ops handling. */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -210,19 +335,86 @@ static inline int arm_feature(CPUARMState *env, int feature)
return (env->features & (1u << feature)) != 0;
}
-void cpu_arm_set_model(CPUARMState *env, uint32_t id);
-
-#define ARM_CPUID_ARM1026 0x4106a262
-#define ARM_CPUID_ARM926 0x41069265
+void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
+
+/* Interface between CPU and Interrupt controller. */
+void armv7m_nvic_set_pending(void *opaque, int irq);
+int armv7m_nvic_acknowledge_irq(void *opaque);
+void armv7m_nvic_complete_irq(void *opaque, int irq);
+
+void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
+ ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
+ void *opaque);
+
+/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3.
+ Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
+ conventional cores (ie. Application or Realtime profile). */
+
+#define IS_M(env) arm_feature(env, ARM_FEATURE_M)
+#define ARM_CPUID(env) (env->cp15.c0_cpuid)
+
+#define ARM_CPUID_ARM1026 0x4106a262
+#define ARM_CPUID_ARM926 0x41069265
+#define ARM_CPUID_ARM946 0x41059461
+#define ARM_CPUID_TI915T 0x54029152
+#define ARM_CPUID_TI925T 0x54029252
+#define ARM_CPUID_PXA250 0x69052100
+#define ARM_CPUID_PXA255 0x69052d00
+#define ARM_CPUID_PXA260 0x69052903
+#define ARM_CPUID_PXA261 0x69052d05
+#define ARM_CPUID_PXA262 0x69052d06
+#define ARM_CPUID_PXA270 0x69054110
+#define ARM_CPUID_PXA270_A0 0x69054110
+#define ARM_CPUID_PXA270_A1 0x69054111
+#define ARM_CPUID_PXA270_B0 0x69054112
+#define ARM_CPUID_PXA270_B1 0x69054113
+#define ARM_CPUID_PXA270_C0 0x69054114
+#define ARM_CPUID_PXA270_C5 0x69054117
+#define ARM_CPUID_ARM1136 0x4117b363
+#define ARM_CPUID_ARM1136_R2 0x4107b362
+#define ARM_CPUID_ARM11MPCORE 0x410fb022
+#define ARM_CPUID_CORTEXA8 0x410fc080
+#define ARM_CPUID_CORTEXM3 0x410fc231
+#define ARM_CPUID_ANY 0xffffffff
#if defined(CONFIG_USER_ONLY)
#define TARGET_PAGE_BITS 12
#else
/* The ARM MMU allows 1k pages. */
/* ??? Linux doesn't actually use these, and they're deprecated in recent
- architecture revisions. Maybe an a configure option to disable them. */
+ architecture revisions. Maybe a configure option to disable them. */
#define TARGET_PAGE_BITS 10
#endif
+
+#define CPUState CPUARMState
+#define cpu_init cpu_arm_init
+#define cpu_exec cpu_arm_exec
+#define cpu_gen_code cpu_arm_gen_code
+#define cpu_signal_handler cpu_arm_signal_handler
+#define cpu_list arm_cpu_list
+
+#define CPU_SAVE_VERSION 1
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUState *env)
+{
+ return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0;
+}
+
+#if defined(CONFIG_USER_ONLY)
+static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+{
+ if (newsp)
+ env->regs[13] = newsp;
+ env->regs[0] = 0;
+}
+#endif
+
+#define CPU_PC_FROM_TB(env, tb) env->regs[15] = tb->pc
+
#include "cpu-all.h"
#endif
diff --git a/target-arm/exec.h b/target-arm/exec.h
index 2d2b99a..c543cf4 100644
--- a/target-arm/exec.h
+++ b/target-arm/exec.h
@@ -1,6 +1,6 @@
/*
* ARM execution defines
- *
+ *
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
@@ -17,19 +17,14 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "config.h"
#include "dyngen-exec.h"
register struct CPUARMState *env asm(AREG0);
register uint32_t T0 asm(AREG1);
register uint32_t T1 asm(AREG2);
-register uint32_t T2 asm(AREG3);
-/* TODO: Put these in FP regs on targets that have such things. */
-/* It is ok for FT0s and FT0d to overlap. Likewise FT1s and FT1d. */
-#define FT0s env->vfp.tmp0s
-#define FT1s env->vfp.tmp1s
-#define FT0d env->vfp.tmp0d
-#define FT1d env->vfp.tmp1d
+#define M0 env->iwmmxt.val
#include "cpu.h"
#include "exec-all.h"
@@ -43,33 +38,26 @@ static inline void regs_to_env(void)
}
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
- int is_user, int is_softmmu);
+ int mmu_idx, int is_softmmu);
+
+static inline int cpu_halted(CPUState *env) {
+ if (!env->halted)
+ return 0;
+ /* An interrupt wakes the CPU even if the I and F CPSR bits are
+ set. We use EXITTB to silently wake CPU without causing an
+ actual interrupt. */
+ if (env->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB)) {
+ env->halted = 0;
+ return 0;
+ }
+ return EXCP_HALTED;
+}
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
#endif
-/* In op_helper.c */
-
-void cpu_lock(void);
-void cpu_unlock(void);
-void helper_set_cp15(CPUState *, uint32_t, uint32_t);
-uint32_t helper_get_cp15(CPUState *, uint32_t);
-
void cpu_loop_exit(void);
void raise_exception(int);
-
-void do_vfp_abss(void);
-void do_vfp_absd(void);
-void do_vfp_negs(void);
-void do_vfp_negd(void);
-void do_vfp_sqrts(void);
-void do_vfp_sqrtd(void);
-void do_vfp_cmps(void);
-void do_vfp_cmpd(void);
-void do_vfp_cmpes(void);
-void do_vfp_cmped(void);
-void do_vfp_set_fpscr(void);
-void do_vfp_get_fpscr(void);
-
diff --git a/target-arm/helper.c b/target-arm/helper.c
index b78f7d1..7cc8b0f 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -4,58 +4,254 @@
#include "cpu.h"
#include "exec-all.h"
-#ifdef CONFIG_TRACE
-#include "trace.h"
-#endif
+#include "gdbstub.h"
+#include "helpers.h"
+#include "qemu-common.h"
+
+static uint32_t cortexa8_cp15_c0_c1[8] =
+{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
+
+static uint32_t cortexa8_cp15_c0_c2[8] =
+{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
+
+static uint32_t mpcore_cp15_c0_c1[8] =
+{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
+
+static uint32_t mpcore_cp15_c0_c2[8] =
+{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
+
+static uint32_t arm1136_cp15_c0_c1[8] =
+{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
+
+static uint32_t arm1136_cp15_c0_c2[8] =
+{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
+
+static uint32_t cpu_arm_find_by_name(const char *name);
+
+static inline void set_feature(CPUARMState *env, int feature)
+{
+ env->features |= 1u << feature;
+}
+
+static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
+{
+ env->cp15.c0_cpuid = id;
+ switch (id) {
+ case ARM_CPUID_ARM926:
+ set_feature(env, ARM_FEATURE_VFP);
+ env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
+ env->cp15.c0_cachetype = 0x1dd20d2;
+ env->cp15.c1_sys = 0x00090078;
+ break;
+ case ARM_CPUID_ARM946:
+ set_feature(env, ARM_FEATURE_MPU);
+ env->cp15.c0_cachetype = 0x0f004006;
+ env->cp15.c1_sys = 0x00000078;
+ break;
+ case ARM_CPUID_ARM1026:
+ set_feature(env, ARM_FEATURE_VFP);
+ set_feature(env, ARM_FEATURE_AUXCR);
+ env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
+ env->cp15.c0_cachetype = 0x1dd20d2;
+ env->cp15.c1_sys = 0x00090078;
+ break;
+ case ARM_CPUID_ARM1136_R2:
+ case ARM_CPUID_ARM1136:
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_VFP);
+ set_feature(env, ARM_FEATURE_AUXCR);
+ env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
+ env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
+ env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
+ memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
+ memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
+ env->cp15.c0_cachetype = 0x1dd20d2;
+ break;
+ case ARM_CPUID_ARM11MPCORE:
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_V6K);
+ set_feature(env, ARM_FEATURE_VFP);
+ set_feature(env, ARM_FEATURE_AUXCR);
+ env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
+ env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
+ env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
+ memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
+ memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
+ env->cp15.c0_cachetype = 0x1dd20d2;
+ break;
+ case ARM_CPUID_CORTEXA8:
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_V6K);
+ set_feature(env, ARM_FEATURE_V7);
+ set_feature(env, ARM_FEATURE_AUXCR);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_VFP);
+ set_feature(env, ARM_FEATURE_VFP3);
+ set_feature(env, ARM_FEATURE_NEON);
+ env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
+ env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
+ env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
+ memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
+ memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
+ env->cp15.c0_cachetype = 0x1dd20d2;
+ break;
+ case ARM_CPUID_CORTEXM3:
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_V7);
+ set_feature(env, ARM_FEATURE_M);
+ set_feature(env, ARM_FEATURE_DIV);
+ break;
+ case ARM_CPUID_ANY: /* For userspace emulation. */
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_V6K);
+ set_feature(env, ARM_FEATURE_V7);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_VFP);
+ set_feature(env, ARM_FEATURE_VFP3);
+ set_feature(env, ARM_FEATURE_NEON);
+ set_feature(env, ARM_FEATURE_DIV);
+ break;
+ case ARM_CPUID_TI915T:
+ case ARM_CPUID_TI925T:
+ set_feature(env, ARM_FEATURE_OMAPCP);
+ env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
+ env->cp15.c0_cachetype = 0x5109149;
+ env->cp15.c1_sys = 0x00000070;
+ env->cp15.c15_i_max = 0x000;
+ env->cp15.c15_i_min = 0xff0;
+ break;
+ case ARM_CPUID_PXA250:
+ case ARM_CPUID_PXA255:
+ case ARM_CPUID_PXA260:
+ case ARM_CPUID_PXA261:
+ case ARM_CPUID_PXA262:
+ set_feature(env, ARM_FEATURE_XSCALE);
+ /* JTAG_ID is ((id << 28) | 0x09265013) */
+ env->cp15.c0_cachetype = 0xd172172;
+ env->cp15.c1_sys = 0x00000078;
+ break;
+ case ARM_CPUID_PXA270_A0:
+ case ARM_CPUID_PXA270_A1:
+ case ARM_CPUID_PXA270_B0:
+ case ARM_CPUID_PXA270_B1:
+ case ARM_CPUID_PXA270_C0:
+ case ARM_CPUID_PXA270_C5:
+ set_feature(env, ARM_FEATURE_XSCALE);
+ /* JTAG_ID is ((id << 28) | 0x09265013) */
+ set_feature(env, ARM_FEATURE_IWMMXT);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
+ env->cp15.c0_cachetype = 0xd172172;
+ env->cp15.c1_sys = 0x00000078;
+ break;
+ default:
+ cpu_abort(env, "Bad CPU ID: %x\n", id);
+ break;
+ }
+}
void cpu_reset(CPUARMState *env)
{
+ uint32_t id;
+ id = env->cp15.c0_cpuid;
+ memset(env, 0, offsetof(CPUARMState, breakpoints));
+ if (id)
+ cpu_reset_model_id(env, id);
#if defined (CONFIG_USER_ONLY)
env->uncached_cpsr = ARM_CPU_MODE_USR;
env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
#else
/* SVC mode with interrupts disabled. */
env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
+ /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
+ clear at reset. */
+ if (IS_M(env))
+ env->uncached_cpsr &= ~CPSR_I;
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
#endif
env->regs[15] = 0;
+ tlb_flush(env, 1);
}
-CPUARMState *cpu_arm_init(void)
+CPUARMState *cpu_arm_init(const char *cpu_model)
{
CPUARMState *env;
+ uint32_t id;
+ static int inited = 0;
+ id = cpu_arm_find_by_name(cpu_model);
+ if (id == 0)
+ return NULL;
env = qemu_mallocz(sizeof(CPUARMState));
if (!env)
return NULL;
cpu_exec_init(env);
+ if (!inited) {
+ inited = 1;
+ arm_translate_init();
+ }
+
+ env->cpu_model_str = cpu_model;
+ env->cp15.c0_cpuid = id;
cpu_reset(env);
- tlb_flush(env, 1);
return env;
}
-static inline void set_feature(CPUARMState *env, int feature)
+struct arm_cpu_t {
+ uint32_t id;
+ const char *name;
+};
+
+static const struct arm_cpu_t arm_cpu_names[] = {
+ { ARM_CPUID_ARM926, "arm926"},
+ { ARM_CPUID_ARM946, "arm946"},
+ { ARM_CPUID_ARM1026, "arm1026"},
+ { ARM_CPUID_ARM1136, "arm1136"},
+ { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
+ { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
+ { ARM_CPUID_CORTEXM3, "cortex-m3"},
+ { ARM_CPUID_CORTEXA8, "cortex-a8"},
+ { ARM_CPUID_TI925T, "ti925t" },
+ { ARM_CPUID_PXA250, "pxa250" },
+ { ARM_CPUID_PXA255, "pxa255" },
+ { ARM_CPUID_PXA260, "pxa260" },
+ { ARM_CPUID_PXA261, "pxa261" },
+ { ARM_CPUID_PXA262, "pxa262" },
+ { ARM_CPUID_PXA270, "pxa270" },
+ { ARM_CPUID_PXA270_A0, "pxa270-a0" },
+ { ARM_CPUID_PXA270_A1, "pxa270-a1" },
+ { ARM_CPUID_PXA270_B0, "pxa270-b0" },
+ { ARM_CPUID_PXA270_B1, "pxa270-b1" },
+ { ARM_CPUID_PXA270_C0, "pxa270-c0" },
+ { ARM_CPUID_PXA270_C5, "pxa270-c5" },
+ { ARM_CPUID_ANY, "any"},
+ { 0, NULL}
+};
+
+void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
{
- env->features |= 1u << feature;
+ int i;
+
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ for (i = 0; arm_cpu_names[i].name; i++) {
+ (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
+ }
}
-void cpu_arm_set_model(CPUARMState *env, uint32_t id)
+/* return 0 if not found */
+static uint32_t cpu_arm_find_by_name(const char *name)
{
- env->cp15.c0_cpuid = id;
- switch (id) {
- case ARM_CPUID_ARM926:
- set_feature(env, ARM_FEATURE_VFP);
- env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
- break;
- case ARM_CPUID_ARM1026:
- set_feature(env, ARM_FEATURE_VFP);
- set_feature(env, ARM_FEATURE_AUXCR);
- env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
- break;
- default:
- cpu_abort(env, "Bad CPU ID: %x\n", id);
- break;
+ int i;
+ uint32_t id;
+
+ id = 0;
+ for (i = 0; arm_cpu_names[i].name; i++) {
+ if (strcmp(name, arm_cpu_names[i].name) == 0) {
+ id = arm_cpu_names[i].id;
+ break;
+ }
}
+ return id;
}
void cpu_arm_close(CPUARMState *env)
@@ -63,6 +259,107 @@ void cpu_arm_close(CPUARMState *env)
free(env);
}
+uint32_t cpsr_read(CPUARMState *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | (env->GE << 16);
+}
+
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ if (mask & CPSR_NZCV) {
+ env->ZF = (~val) & CPSR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+ if (mask & CPSR_Q)
+ env->QF = ((val & CPSR_Q) != 0);
+ if (mask & CPSR_T)
+ env->thumb = ((val & CPSR_T) != 0);
+ if (mask & CPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & CPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & CPSR_GE) {
+ env->GE = (val >> 16) & 0xf;
+ }
+
+ if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
+ switch_mode(env, val & CPSR_M);
+ }
+ mask &= ~CACHED_CPSR_BITS;
+ env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
+}
+
+/* Sign/zero extend */
+uint32_t HELPER(sxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(int8_t)x;
+ res |= (uint32_t)(int8_t)(x >> 16) << 16;
+ return res;
+}
+
+uint32_t HELPER(uxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(uint8_t)x;
+ res |= (uint32_t)(uint8_t)(x >> 16) << 16;
+ return res;
+}
+
+uint32_t HELPER(clz)(uint32_t x)
+{
+ int count;
+ for (count = 32; x; count--)
+ x >>= 1;
+ return count;
+}
+
+int32_t HELPER(sdiv)(int32_t num, int32_t den)
+{
+ if (den == 0)
+ return 0;
+ return num / den;
+}
+
+uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
+{
+ if (den == 0)
+ return 0;
+ return num / den;
+}
+
+uint32_t HELPER(rbit)(uint32_t x)
+{
+ x = ((x & 0xff000000) >> 24)
+ | ((x & 0x00ff0000) >> 8)
+ | ((x & 0x0000ff00) << 8)
+ | ((x & 0x000000ff) << 24);
+ x = ((x & 0xf0f0f0f0) >> 4)
+ | ((x & 0x0f0f0f0f) << 4);
+ x = ((x & 0x88888888) >> 3)
+ | ((x & 0x44444444) >> 1)
+ | ((x & 0x22222222) << 1)
+ | ((x & 0x11111111) << 3);
+ return x;
+}
+
+uint32_t HELPER(abs)(uint32_t x)
+{
+ return ((int32_t)x < 0) ? -x : x;
+}
+
#if defined(CONFIG_USER_ONLY)
void do_interrupt (CPUState *env)
@@ -70,8 +367,18 @@ void do_interrupt (CPUState *env)
env->exception_index = -1;
}
+/* Structure used to record exclusive memory locations. */
+typedef struct mmon_state {
+ struct mmon_state *next;
+ CPUARMState *cpu_env;
+ uint32_t addr;
+} mmon_state;
+
+/* Chain of current locks. */
+static mmon_state* mmon_head = NULL;
+
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
- int is_user, int is_softmmu)
+ int mmu_idx, int is_softmmu)
{
if (rw == 2) {
env->exception_index = EXCP_PREFETCH_ABORT;
@@ -83,31 +390,128 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
return 1;
}
-target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+static void allocate_mmon_state(CPUState *env)
+{
+ env->mmon_entry = malloc(sizeof (mmon_state));
+ if (!env->mmon_entry)
+ abort();
+ memset (env->mmon_entry, 0, sizeof (mmon_state));
+ env->mmon_entry->cpu_env = env;
+ mmon_head = env->mmon_entry;
+}
+
+/* Flush any monitor locks for the specified address. */
+static void flush_mmon(uint32_t addr)
+{
+ mmon_state *mon;
+
+ for (mon = mmon_head; mon; mon = mon->next)
+ {
+ if (mon->addr != addr)
+ continue;
+
+ mon->addr = 0;
+ break;
+ }
+}
+
+/* Mark an address for exclusive access. */
+void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
+{
+ if (!env->mmon_entry)
+ allocate_mmon_state(env);
+ /* Clear any previous locks. */
+ flush_mmon(addr);
+ env->mmon_entry->addr = addr;
+}
+
+/* Test if an exclusive address is still exclusive. Returns zero
+ if the address is still exclusive. */
+uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
+{
+ int res;
+
+ if (!env->mmon_entry)
+ return 1;
+ if (env->mmon_entry->addr == addr)
+ res = 0;
+ else
+ res = 1;
+ flush_mmon(addr);
+ return res;
+}
+
+void HELPER(clrex)(CPUState *env)
+{
+ if (!(env->mmon_entry && env->mmon_entry->addr))
+ return;
+ flush_mmon(env->mmon_entry->addr);
+}
+
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
return addr;
}
/* These should probably raise undefined insn exceptions. */
-void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
+void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
+{
+ int op1 = (insn >> 8) & 0xf;
+ cpu_abort(env, "cp%i insn %08x\n", op1, insn);
+ return;
+}
+
+uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
+{
+ int op1 = (insn >> 8) & 0xf;
+ cpu_abort(env, "cp%i insn %08x\n", op1, insn);
+ return 0;
+}
+
+void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
}
-uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
return 0;
}
+/* These should probably raise undefined insn exceptions. */
+void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
+{
+ cpu_abort(env, "v7m_mrs %d\n", reg);
+}
+
+uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
+{
+ cpu_abort(env, "v7m_mrs %d\n", reg);
+ return 0;
+}
+
void switch_mode(CPUState *env, int mode)
{
if (mode != ARM_CPU_MODE_USR)
cpu_abort(env, "Tried to switch out of user mode\n");
}
+void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
+{
+ cpu_abort(env, "banked r13 write\n");
+}
+
+uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
+{
+ cpu_abort(env, "banked r13 read\n");
+ return 0;
+}
+
#else
+extern int semihosting_enabled;
+
/* Map CPU modes onto saved register banks. */
static inline int bank_number (int mode)
{
@@ -158,6 +562,138 @@ void switch_mode(CPUState *env, int mode)
env->spsr = env->banked_spsr[i];
}
+static void v7m_push(CPUARMState *env, uint32_t val)
+{
+ env->regs[13] -= 4;
+ stl_phys(env->regs[13], val);
+}
+
+static uint32_t v7m_pop(CPUARMState *env)
+{
+ uint32_t val;
+ val = ldl_phys(env->regs[13]);
+ env->regs[13] += 4;
+ return val;
+}
+
+/* Switch to V7M main or process stack pointer. */
+static void switch_v7m_sp(CPUARMState *env, int process)
+{
+ uint32_t tmp;
+ if (env->v7m.current_sp != process) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ env->v7m.current_sp = process;
+ }
+}
+
+static void do_v7m_exception_exit(CPUARMState *env)
+{
+ uint32_t type;
+ uint32_t xpsr;
+
+ type = env->regs[15];
+ if (env->v7m.exception != 0)
+ armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
+
+ /* Switch to the target stack. */
+ switch_v7m_sp(env, (type & 4) != 0);
+ /* Pop registers. */
+ env->regs[0] = v7m_pop(env);
+ env->regs[1] = v7m_pop(env);
+ env->regs[2] = v7m_pop(env);
+ env->regs[3] = v7m_pop(env);
+ env->regs[12] = v7m_pop(env);
+ env->regs[14] = v7m_pop(env);
+ env->regs[15] = v7m_pop(env);
+ xpsr = v7m_pop(env);
+ xpsr_write(env, xpsr, 0xfffffdff);
+ /* Undo stack alignment. */
+ if (xpsr & 0x200)
+ env->regs[13] |= 4;
+ /* ??? The exception return type specifies Thread/Handler mode. However
+ this is also implied by the xPSR value. Not sure what to do
+ if there is a mismatch. */
+ /* ??? Likewise for mismatches between the CONTROL register and the stack
+ pointer. */
+}
+
+void do_interrupt_v7m(CPUARMState *env)
+{
+ uint32_t xpsr = xpsr_read(env);
+ uint32_t lr;
+ uint32_t addr;
+
+ lr = 0xfffffff1;
+ if (env->v7m.current_sp)
+ lr |= 4;
+ if (env->v7m.exception == 0)
+ lr |= 8;
+
+ /* For exceptions we just mark as pending on the NVIC, and let that
+ handle it. */
+ /* TODO: Need to escalate if the current priority is higher than the
+ one we're raising. */
+ switch (env->exception_index) {
+ case EXCP_UDEF:
+ armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
+ return;
+ case EXCP_SWI:
+ env->regs[15] += 2;
+ armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
+ return;
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
+ return;
+ case EXCP_BKPT:
+ if (semihosting_enabled) {
+ int nr;
+ nr = lduw_code(env->regs[15]) & 0xff;
+ if (nr == 0xab) {
+ env->regs[15] += 2;
+ env->regs[0] = do_arm_semihosting(env);
+ return;
+ }
+ }
+ armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
+ return;
+ case EXCP_IRQ:
+ env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
+ break;
+ case EXCP_EXCEPTION_EXIT:
+ do_v7m_exception_exit(env);
+ return;
+ default:
+ cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ return; /* Never happens. Keep compiler happy. */
+ }
+
+ /* Align stack pointer. */
+ /* ??? Should only do this if Configuration Control Register
+ STACKALIGN bit is set. */
+ if (env->regs[13] & 4) {
+ env->regs[13] -= 4;
+ xpsr |= 0x200;
+ }
+ /* Switch to the handler mode. */
+ v7m_push(env, xpsr);
+ v7m_push(env, env->regs[15]);
+ v7m_push(env, env->regs[14]);
+ v7m_push(env, env->regs[12]);
+ v7m_push(env, env->regs[3]);
+ v7m_push(env, env->regs[2]);
+ v7m_push(env, env->regs[1]);
+ v7m_push(env, env->regs[0]);
+ switch_v7m_sp(env, 0);
+ env->uncached_cpsr &= ~CPSR_IT;
+ env->regs[14] = lr;
+ addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
+ env->regs[15] = addr & 0xfffffffe;
+ env->thumb = addr & 1;
+}
+
/* Handle a CPU exception. */
void do_interrupt(CPUARMState *env)
{
@@ -166,11 +702,10 @@ void do_interrupt(CPUARMState *env)
int new_mode;
uint32_t offset;
-#ifdef CONFIG_TRACE
- if (tracing)
- trace_exception(env->regs[15]);
-#endif
-
+ if (IS_M(env)) {
+ do_interrupt_v7m(env);
+ return;
+ }
/* TODO: Vectored interrupt controller. */
switch (env->exception_index) {
case EXCP_UDEF:
@@ -183,14 +718,41 @@ void do_interrupt(CPUARMState *env)
offset = 4;
break;
case EXCP_SWI:
+ if (semihosting_enabled) {
+ /* Check for semihosting interrupt. */
+ if (env->thumb) {
+ mask = lduw_code(env->regs[15] - 2) & 0xff;
+ } else {
+ mask = ldl_code(env->regs[15] - 4) & 0xffffff;
+ }
+ /* Only intercept calls from privileged modes, to provide some
+ semblance of security. */
+ if (((mask == 0x123456 && !env->thumb)
+ || (mask == 0xab && env->thumb))
+ && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
+ env->regs[0] = do_arm_semihosting(env);
+ return;
+ }
+ }
new_mode = ARM_CPU_MODE_SVC;
addr = 0x08;
mask = CPSR_I;
- /* The PC already points to the next instructon. */
+ /* The PC already points to the next instruction. */
offset = 0;
break;
- case EXCP_PREFETCH_ABORT:
case EXCP_BKPT:
+ /* See if this is a semihosting syscall. */
+ if (env->thumb && semihosting_enabled) {
+ mask = lduw_code(env->regs[15]) & 0xff;
+ if (mask == 0xab
+ && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
+ env->regs[15] += 2;
+ env->regs[0] = do_arm_semihosting(env);
+ return;
+ }
+ }
+ /* Fall through to prefetch abort. */
+ case EXCP_PREFETCH_ABORT:
new_mode = ARM_CPU_MODE_ABT;
addr = 0x0c;
mask = CPSR_A | CPSR_I;
@@ -226,6 +788,8 @@ void do_interrupt(CPUARMState *env)
}
switch_mode (env, new_mode);
env->spsr = cpsr_read(env);
+ /* Clear IT bits. */
+ env->condexec_bits = 0;
/* Switch to the new mode, and switch to Arm mode. */
/* ??? Thumb interrupt handlers not implemented. */
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
@@ -242,9 +806,16 @@ void do_interrupt(CPUARMState *env)
static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
int is_user)
{
+ int prot_ro;
+
if (domain == 3)
return PAGE_READ | PAGE_WRITE;
+ if (access_type == 1)
+ prot_ro = 0;
+ else
+ prot_ro = PAGE_READ;
+
switch (ap) {
case 0:
if (access_type == 1)
@@ -261,18 +832,24 @@ static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
return is_user ? 0 : PAGE_READ | PAGE_WRITE;
case 2:
if (is_user)
- return (access_type == 1) ? 0 : PAGE_READ;
+ return prot_ro;
else
return PAGE_READ | PAGE_WRITE;
case 3:
return PAGE_READ | PAGE_WRITE;
+ case 4: case 7: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : prot_ro;
+ case 6:
+ return prot_ro;
default:
abort();
}
}
-static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
+ int is_user, uint32_t *phys_ptr, int *prot)
{
int code;
uint32_t table;
@@ -282,94 +859,273 @@ static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
int domain;
uint32_t phys_addr;
- /* Fast Context Switch Extension. */
- if (address < 0x02000000)
- address += env->cp15.c13_fcse;
-
- if ((env->cp15.c1_sys & 1) == 0) {
- /* MMU diusabled. */
- *phys_ptr = address;
- *prot = PAGE_READ | PAGE_WRITE;
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (address & env->cp15.c2_mask)
+ table = env->cp15.c2_base1;
+ else
+ table = env->cp15.c2_base0;
+ table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
+ desc = ldl_phys(table);
+ type = (desc & 3);
+ domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
+ if (type == 0) {
+ /* Section translation fault. */
+ code = 5;
+ goto do_fault;
+ }
+ if (domain == 0 || domain == 2) {
+ if (type == 2)
+ code = 9; /* Section domain fault. */
+ else
+ code = 11; /* Page domain fault. */
+ goto do_fault;
+ }
+ if (type == 2) {
+ /* 1Mb section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ ap = (desc >> 10) & 3;
+ code = 13;
} else {
- /* Pagetable walk. */
- /* Lookup l1 descriptor. */
- table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
+ /* Lookup l2 entry. */
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
desc = ldl_phys(table);
- type = (desc & 3);
- domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
- if (type == 0) {
- /* Secton translation fault. */
- code = 5;
- goto do_fault;
- }
- if (domain == 0 || domain == 2) {
- if (type == 2)
- code = 9; /* Section domain fault. */
- else
- code = 11; /* Page domain fault. */
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ code = 7;
goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ break;
+ case 2: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ break;
+ case 3: /* 1k page. */
+ if (type == 1) {
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ } else {
+ /* Page translation fault. */
+ code = 7;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ }
+ ap = (desc >> 4) & 3;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
}
- if (type == 2) {
- /* 1Mb section. */
- phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
- ap = (desc >> 10) & 3;
- code = 13;
+ code = 15;
+ }
+ *prot = check_ap(env, ap, domain, access_type, is_user);
+ if (!*prot) {
+ /* Access permission fault. */
+ goto do_fault;
+ }
+ *phys_ptr = phys_addr;
+ return 0;
+do_fault:
+ return code | (domain << 4);
+}
+
+static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
+ int is_user, uint32_t *phys_ptr, int *prot)
+{
+ int code;
+ uint32_t table;
+ uint32_t desc;
+ uint32_t xn;
+ int type;
+ int ap;
+ int domain;
+ uint32_t phys_addr;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (address & env->cp15.c2_mask)
+ table = env->cp15.c2_base1;
+ else
+ table = env->cp15.c2_base0;
+ table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
+ desc = ldl_phys(table);
+ type = (desc & 3);
+ if (type == 0) {
+ /* Section translation fault. */
+ code = 5;
+ domain = 0;
+ goto do_fault;
+ } else if (type == 2 && (desc & (1 << 18))) {
+ /* Supersection. */
+ domain = 0;
+ } else {
+ /* Section or page. */
+ domain = (desc >> 4) & 0x1e;
+ }
+ domain = (env->cp15.c3 >> domain) & 3;
+ if (domain == 0 || domain == 2) {
+ if (type == 2)
+ code = 9; /* Section domain fault. */
+ else
+ code = 11; /* Page domain fault. */
+ goto do_fault;
+ }
+ if (type == 2) {
+ if (desc & (1 << 18)) {
+ /* Supersection. */
+ phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
} else {
- /* Lookup l2 entry. */
- table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- desc = ldl_phys(table);
- switch (desc & 3) {
- case 0: /* Page translation fault. */
- code = 7;
- goto do_fault;
- case 1: /* 64k page. */
- phys_addr = (desc & 0xffff0000) | (address & 0xffff);
- ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
- break;
- case 2: /* 4k page. */
- phys_addr = (desc & 0xfffff000) | (address & 0xfff);
- ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
- break;
- case 3: /* 1k page. */
- if (type == 1) {
- /* Page translation fault. */
- code = 7;
- goto do_fault;
- }
- phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
- ap = (desc >> 4) & 3;
- break;
- default:
- /* Never happens, but compiler isn't smart enough to tell. */
- abort();
- }
- code = 15;
+ /* Section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
}
- *prot = check_ap(env, ap, domain, access_type, is_user);
- if (!*prot) {
- /* Access permission fault. */
+ ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
+ xn = desc & (1 << 4);
+ code = 13;
+ } else {
+ /* Lookup l2 entry. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ desc = ldl_phys(table);
+ ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ code = 7;
goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ xn = desc & (1 << 15);
+ break;
+ case 2: case 3: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ xn = desc & 1;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
}
- *phys_ptr = phys_addr;
+ code = 15;
+ }
+ if (xn && access_type == 2)
+ goto do_fault;
+
+ *prot = check_ap(env, ap, domain, access_type, is_user);
+ if (!*prot) {
+ /* Access permission fault. */
+ goto do_fault;
}
+ *phys_ptr = phys_addr;
return 0;
do_fault:
return code | (domain << 4);
}
+static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
+ int is_user, uint32_t *phys_ptr, int *prot)
+{
+ int n;
+ uint32_t mask;
+ uint32_t base;
+
+ *phys_ptr = address;
+ for (n = 7; n >= 0; n--) {
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0)
+ continue;
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0)
+ break;
+ }
+ if (n < 0)
+ return 2;
+
+ if (access_type == 2) {
+ mask = env->cp15.c5_insn;
+ } else {
+ mask = env->cp15.c5_data;
+ }
+ mask = (mask >> (n * 4)) & 0xf;
+ switch (mask) {
+ case 0:
+ return 1;
+ case 1:
+ if (is_user)
+ return 1;
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 2:
+ *prot = PAGE_READ;
+ if (!is_user)
+ *prot |= PAGE_WRITE;
+ break;
+ case 3:
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 5:
+ if (is_user)
+ return 1;
+ *prot = PAGE_READ;
+ break;
+ case 6:
+ *prot = PAGE_READ;
+ break;
+ default:
+ /* Bad permission. */
+ return 1;
+ }
+ return 0;
+}
+
+static inline int get_phys_addr(CPUState *env, uint32_t address,
+ int access_type, int is_user,
+ uint32_t *phys_ptr, int *prot)
+{
+ /* Fast Context Switch Extension. */
+ if (address < 0x02000000)
+ address += env->cp15.c13_fcse;
+
+ if ((env->cp15.c1_sys & 1) == 0) {
+ /* MMU/MPU disabled. */
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE;
+ return 0;
+ } else if (arm_feature(env, ARM_FEATURE_MPU)) {
+ return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
+ prot);
+ } else if (env->cp15.c1_sys & (1 << 23)) {
+ return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
+ prot);
+ } else {
+ return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
+ prot);
+ }
+}
+
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
- int access_type, int is_user, int is_softmmu)
+ int access_type, int mmu_idx, int is_softmmu)
{
uint32_t phys_addr;
int prot;
- int ret;
+ int ret, is_user;
+ is_user = mmu_idx == MMU_USER_IDX;
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
if (ret == 0) {
/* Map a single [sub]page. */
phys_addr &= ~(uint32_t)0x3ff;
address &= ~(uint32_t)0x3ff;
- return tlb_set_page (env, address, phys_addr, prot, is_user,
+ return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
is_softmmu);
}
@@ -379,13 +1135,15 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
env->exception_index = EXCP_PREFETCH_ABORT;
} else {
env->cp15.c5_data = ret;
+ if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
+ env->cp15.c5_data |= (1 << 11);
env->cp15.c6_data = address;
env->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
-target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
uint32_t phys_addr;
int prot;
@@ -399,65 +1157,217 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
return phys_addr;
}
-void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
+/* Not really implemented. Need to figure out a sane way of doing this.
+ Maybe add generic watchpoint support and use that. */
+
+void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
+{
+ env->mmon_addr = addr;
+}
+
+uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
+{
+ return (env->mmon_addr != addr);
+}
+
+void HELPER(clrex)(CPUState *env)
{
- uint32_t op2;
+ env->mmon_addr = -1;
+}
+
+void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
+{
+ int cp_num = (insn >> 8) & 0xf;
+ int cp_info = (insn >> 5) & 7;
+ int src = (insn >> 16) & 0xf;
+ int operand = insn & 0xf;
+
+ if (env->cp[cp_num].cp_write)
+ env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
+ cp_info, src, operand, val);
+}
+
+uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
+{
+ int cp_num = (insn >> 8) & 0xf;
+ int cp_info = (insn >> 5) & 7;
+ int dest = (insn >> 16) & 0xf;
+ int operand = insn & 0xf;
+
+ if (env->cp[cp_num].cp_read)
+ return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
+ cp_info, dest, operand);
+ return 0;
+}
+
+/* Return basic MPU access permission bits. */
+static uint32_t simple_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val >> i) & mask;
+ mask <<= 2;
+ }
+ return ret;
+}
+
+/* Pad basic MPU access permission bits to extended format. */
+static uint32_t extended_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val & mask) << i;
+ mask <<= 2;
+ }
+ return ret;
+}
+void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
+{
+ int op1;
+ int op2;
+ int crm;
+
+ op1 = (insn >> 21) & 7;
op2 = (insn >> 5) & 7;
+ crm = insn & 0xf;
switch ((insn >> 16) & 0xf) {
- case 0: /* ID codes. */
+ case 0:
+ if (((insn >> 21) & 7) == 2) {
+ /* ??? Select cache level. Ignore. */
+ return;
+ }
+ /* ID codes. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ break;
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ break;
goto bad_reg;
case 1: /* System configuration. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
switch (op2) {
case 0:
- env->cp15.c1_sys = val;
+ if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
+ env->cp15.c1_sys = val;
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(env, 1);
break;
+ case 1: /* Auxiliary cotrol register. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ env->cp15.c1_xscaleauxcr = val;
+ break;
+ }
+ /* Not implemented. */
+ break;
case 2:
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ goto bad_reg;
env->cp15.c1_coproc = val;
/* ??? Is this safe when called from within a TB? */
tb_flush(env);
+ break;
default:
goto bad_reg;
}
break;
- case 2: /* MMU Page table control. */
- env->cp15.c2 = val;
+ case 2: /* MMU Page table control / MPU cache control. */
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ switch (op2) {
+ case 0:
+ env->cp15.c2_data = val;
+ break;
+ case 1:
+ env->cp15.c2_insn = val;
+ break;
+ default:
+ goto bad_reg;
+ }
+ } else {
+ switch (op2) {
+ case 0:
+ env->cp15.c2_base0 = val;
+ break;
+ case 1:
+ env->cp15.c2_base1 = val;
+ break;
+ case 2:
+ env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
+ break;
+ default:
+ goto bad_reg;
+ }
+ }
break;
- case 3: /* MMU Domain access control. */
+ case 3: /* MMU Domain access control / MPU write buffer control. */
env->cp15.c3 = val;
- tlb_flush(env, 1);
+ tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
break;
case 4: /* Reserved. */
goto bad_reg;
- case 5: /* MMU Fault status. */
+ case 5: /* MMU Fault status / MPU access permission. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
switch (op2) {
case 0:
+ if (arm_feature(env, ARM_FEATURE_MPU))
+ val = extended_mpu_ap_bits(val);
env->cp15.c5_data = val;
break;
case 1:
+ if (arm_feature(env, ARM_FEATURE_MPU))
+ val = extended_mpu_ap_bits(val);
env->cp15.c5_insn = val;
break;
- default:
- goto bad_reg;
- }
- break;
- case 6: /* MMU Fault address. */
- switch (op2) {
- case 0:
- env->cp15.c6_data = val;
+ case 2:
+ if (!arm_feature(env, ARM_FEATURE_MPU))
+ goto bad_reg;
+ env->cp15.c5_data = val;
break;
- case 1:
- env->cp15.c6_insn = val;
+ case 3:
+ if (!arm_feature(env, ARM_FEATURE_MPU))
+ goto bad_reg;
+ env->cp15.c5_insn = val;
break;
default:
goto bad_reg;
}
break;
+ case 6: /* MMU Fault address / MPU base/size. */
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (crm >= 8)
+ goto bad_reg;
+ env->cp15.c6_region[crm] = val;
+ } else {
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
+ switch (op2) {
+ case 0:
+ env->cp15.c6_data = val;
+ break;
+ case 1: /* ??? This is WFAR on armv6 */
+ case 2:
+ env->cp15.c6_insn = val;
+ break;
+ default:
+ goto bad_reg;
+ }
+ }
+ break;
case 7: /* Cache control. */
+ env->cp15.c15_i_max = 0x000;
+ env->cp15.c15_i_min = 0xff0;
/* No cache, so nothing to do. */
+ /* ??? MPCore has VA to PA translation functions. */
break;
case 8: /* MMU TLB control. */
switch (op2) {
@@ -475,21 +1385,48 @@ void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
tlb_flush_page(env, val + 0x800);
tlb_flush_page(env, val + 0xc00);
#else
- //tlb_flush(env, 1);
+ tlb_flush(env, 1);
#endif
break;
+ case 2: /* Invalidate on ASID. */
+ tlb_flush(env, val == 0);
+ break;
+ case 3: /* Invalidate single entry on MVA. */
+ /* ??? This is like case 1, but ignores ASID. */
+ tlb_flush(env, 1);
+ break;
default:
goto bad_reg;
}
break;
- case 9: /* Cache lockdown. */
- switch (op2) {
- case 0:
- env->cp15.c9_data = val;
- break;
- case 1:
- env->cp15.c9_insn = val;
+ case 9:
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
break;
+ switch (crm) {
+ case 0: /* Cache lockdown. */
+ switch (op1) {
+ case 0: /* L1 cache. */
+ switch (op2) {
+ case 0:
+ env->cp15.c9_data = val;
+ break;
+ case 1:
+ env->cp15.c9_insn = val;
+ break;
+ default:
+ goto bad_reg;
+ }
+ break;
+ case 1: /* L2 cache. */
+ /* Ignore writes to L2 lockdown/auxiliary registers. */
+ break;
+ default:
+ goto bad_reg;
+ }
+ break;
+ case 1: /* TCM memory region registers. */
+ /* Not implemented. */
+ goto bad_reg;
default:
goto bad_reg;
}
@@ -497,16 +1434,33 @@ void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
case 10: /* MMU TLB lockdown. */
/* ??? TLB lockdown not implemented. */
break;
- case 11: /* TCM DMA control. */
case 12: /* Reserved. */
goto bad_reg;
case 13: /* Process ID. */
switch (op2) {
case 0:
- env->cp15.c9_data = val;
+ /* Unlike real hardware the qemu TLB uses virtual addresses,
+ not modified virtual addresses, so this causes a TLB flush.
+ */
+ if (env->cp15.c13_fcse != val)
+ tlb_flush(env, 1);
+ env->cp15.c13_fcse = val;
break;
case 1:
- env->cp15.c9_insn = val;
+ /* This changes the ASID, so do a TLB flush. */
+ if (env->cp15.c13_context != val
+ && !arm_feature(env, ARM_FEATURE_MPU))
+ tlb_flush(env, 0);
+ env->cp15.c13_context = val;
+ break;
+ case 2:
+ env->cp15.c13_tls1 = val;
+ break;
+ case 3:
+ env->cp15.c13_tls2 = val;
+ break;
+ case 4:
+ env->cp15.c13_tls3 = val;
break;
default:
goto bad_reg;
@@ -515,83 +1469,254 @@ void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
case 14: /* Reserved. */
goto bad_reg;
case 15: /* Implementation specific. */
- /* ??? Internal registers not implemented. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ if (op2 == 0 && crm == 1) {
+ if (env->cp15.c15_cpar != (val & 0x3fff)) {
+ /* Changes cp0 to cp13 behavior, so needs a TB flush. */
+ tb_flush(env);
+ env->cp15.c15_cpar = val & 0x3fff;
+ }
+ break;
+ }
+ goto bad_reg;
+ }
+ if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
+ switch (crm) {
+ case 0:
+ break;
+ case 1: /* Set TI925T configuration. */
+ env->cp15.c15_ticonfig = val & 0xe7;
+ env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
+ ARM_CPUID_TI915T : ARM_CPUID_TI925T;
+ break;
+ case 2: /* Set I_max. */
+ env->cp15.c15_i_max = val;
+ break;
+ case 3: /* Set I_min. */
+ env->cp15.c15_i_min = val;
+ break;
+ case 4: /* Set thread-ID. */
+ env->cp15.c15_threadid = val & 0xffff;
+ break;
+ case 8: /* Wait-for-interrupt (deprecated). */
+ cpu_interrupt(env, CPU_INTERRUPT_HALT);
+ break;
+ default:
+ goto bad_reg;
+ }
+ }
break;
}
return;
bad_reg:
/* ??? For debugging only. Should raise illegal instruction exception. */
- cpu_abort(env, "Unimplemented cp15 register read\n");
+ cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
+ (insn >> 16) & 0xf, crm, op1, op2);
}
-uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
{
- uint32_t op2;
+ int op1;
+ int op2;
+ int crm;
+ op1 = (insn >> 21) & 7;
op2 = (insn >> 5) & 7;
+ crm = insn & 0xf;
switch ((insn >> 16) & 0xf) {
case 0: /* ID codes. */
- switch (op2) {
- default: /* Device ID. */
- return env->cp15.c0_cpuid;
- case 1: /* Cache Type. */
- return 0x1dd20d2;
- case 2: /* TCM status. */
+ switch (op1) {
+ case 0:
+ switch (crm) {
+ case 0:
+ switch (op2) {
+ case 0: /* Device ID. */
+ return env->cp15.c0_cpuid;
+ case 1: /* Cache Type. */
+ return env->cp15.c0_cachetype;
+ case 2: /* TCM status. */
+ return 0;
+ case 3: /* TLB type register. */
+ return 0; /* No lockable TLB entries. */
+ case 5: /* CPU ID */
+ return env->cpu_index;
+ default:
+ goto bad_reg;
+ }
+ case 1:
+ if (!arm_feature(env, ARM_FEATURE_V6))
+ goto bad_reg;
+ return env->cp15.c0_c1[op2];
+ case 2:
+ if (!arm_feature(env, ARM_FEATURE_V6))
+ goto bad_reg;
+ return env->cp15.c0_c2[op2];
+ case 3: case 4: case 5: case 6: case 7:
+ return 0;
+ default:
+ goto bad_reg;
+ }
+ case 1:
+ /* These registers aren't documented on arm11 cores. However
+ Linux looks at them anyway. */
+ if (!arm_feature(env, ARM_FEATURE_V6))
+ goto bad_reg;
+ if (crm != 0)
+ goto bad_reg;
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ goto bad_reg;
return 0;
+ default:
+ goto bad_reg;
}
case 1: /* System configuration. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
switch (op2) {
case 0: /* Control register. */
return env->cp15.c1_sys;
case 1: /* Auxiliary control register. */
- if (arm_feature(env, ARM_FEATURE_AUXCR))
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ return env->cp15.c1_xscaleauxcr;
+ if (!arm_feature(env, ARM_FEATURE_AUXCR))
+ goto bad_reg;
+ switch (ARM_CPUID(env)) {
+ case ARM_CPUID_ARM1026:
return 1;
- goto bad_reg;
+ case ARM_CPUID_ARM1136:
+ case ARM_CPUID_ARM1136_R2:
+ return 7;
+ case ARM_CPUID_ARM11MPCORE:
+ return 1;
+ case ARM_CPUID_CORTEXA8:
+ return 0;
+ default:
+ goto bad_reg;
+ }
case 2: /* Coprocessor access register. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE))
+ goto bad_reg;
return env->cp15.c1_coproc;
default:
goto bad_reg;
}
- case 2: /* MMU Page table control. */
- return env->cp15.c2;
- case 3: /* MMU Domain access control. */
+ case 2: /* MMU Page table control / MPU cache control. */
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ switch (op2) {
+ case 0:
+ return env->cp15.c2_data;
+ break;
+ case 1:
+ return env->cp15.c2_insn;
+ break;
+ default:
+ goto bad_reg;
+ }
+ } else {
+ switch (op2) {
+ case 0:
+ return env->cp15.c2_base0;
+ case 1:
+ return env->cp15.c2_base1;
+ case 2:
+ {
+ int n;
+ uint32_t mask;
+ n = 0;
+ mask = env->cp15.c2_mask;
+ while (mask) {
+ n++;
+ mask <<= 1;
+ }
+ return n;
+ }
+ default:
+ goto bad_reg;
+ }
+ }
+ case 3: /* MMU Domain access control / MPU write buffer control. */
return env->cp15.c3;
case 4: /* Reserved. */
goto bad_reg;
- case 5: /* MMU Fault status. */
+ case 5: /* MMU Fault status / MPU access permission. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
switch (op2) {
case 0:
+ if (arm_feature(env, ARM_FEATURE_MPU))
+ return simple_mpu_ap_bits(env->cp15.c5_data);
return env->cp15.c5_data;
case 1:
+ if (arm_feature(env, ARM_FEATURE_MPU))
+ return simple_mpu_ap_bits(env->cp15.c5_data);
+ return env->cp15.c5_insn;
+ case 2:
+ if (!arm_feature(env, ARM_FEATURE_MPU))
+ goto bad_reg;
+ return env->cp15.c5_data;
+ case 3:
+ if (!arm_feature(env, ARM_FEATURE_MPU))
+ goto bad_reg;
return env->cp15.c5_insn;
default:
goto bad_reg;
}
case 6: /* MMU Fault address. */
- switch (op2) {
- case 0:
- return env->cp15.c6_data;
- case 1:
- /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't
- do any harm. */
- return env->cp15.c6_insn;
- default:
- goto bad_reg;
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (crm >= 8)
+ goto bad_reg;
+ return env->cp15.c6_region[crm];
+ } else {
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ op2 = 0;
+ switch (op2) {
+ case 0:
+ return env->cp15.c6_data;
+ case 1:
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* Watchpoint Fault Adrress. */
+ return 0; /* Not implemented. */
+ } else {
+ /* Instruction Fault Adrress. */
+ /* Arm9 doesn't have an IFAR, but implementing it anyway
+ shouldn't do any harm. */
+ return env->cp15.c6_insn;
+ }
+ case 2:
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* Instruction Fault Adrress. */
+ return env->cp15.c6_insn;
+ } else {
+ goto bad_reg;
+ }
+ default:
+ goto bad_reg;
+ }
}
case 7: /* Cache control. */
- /* ??? This is for test, clean and invaidate operations that set the
- Z flag. We can't represent N = Z = 1, so it also clears clears
- the N flag. Oh well. */
- env->NZF = 0;
+ /* FIXME: Should only clear Z flag if destination is r15. */
+ env->ZF = 0;
return 0;
case 8: /* MMU TLB control. */
goto bad_reg;
case 9: /* Cache lockdown. */
- switch (op2) {
- case 0:
- return env->cp15.c9_data;
- case 1:
- return env->cp15.c9_insn;
+ switch (op1) {
+ case 0: /* L1 cache. */
+ if (arm_feature(env, ARM_FEATURE_OMAPCP))
+ return 0;
+ switch (op2) {
+ case 0:
+ return env->cp15.c9_data;
+ case 1:
+ return env->cp15.c9_insn;
+ default:
+ goto bad_reg;
+ }
+ case 1: /* L2 cache */
+ if (crm != 0)
+ goto bad_reg;
+ /* L2 Lockdown and Auxiliary control. */
+ return 0;
default:
goto bad_reg;
}
@@ -607,19 +1732,822 @@ uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
return env->cp15.c13_fcse;
case 1:
return env->cp15.c13_context;
+ case 2:
+ return env->cp15.c13_tls1;
+ case 3:
+ return env->cp15.c13_tls2;
+ case 4:
+ return env->cp15.c13_tls3;
default:
goto bad_reg;
}
case 14: /* Reserved. */
goto bad_reg;
case 15: /* Implementation specific. */
- /* ??? Internal registers not implemented. */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ if (op2 == 0 && crm == 1)
+ return env->cp15.c15_cpar;
+
+ goto bad_reg;
+ }
+ if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
+ switch (crm) {
+ case 0:
+ return 0;
+ case 1: /* Read TI925T configuration. */
+ return env->cp15.c15_ticonfig;
+ case 2: /* Read I_max. */
+ return env->cp15.c15_i_max;
+ case 3: /* Read I_min. */
+ return env->cp15.c15_i_min;
+ case 4: /* Read thread-ID. */
+ return env->cp15.c15_threadid;
+ case 8: /* TI925T_status */
+ return 0;
+ }
+ /* TODO: Peripheral port remap register:
+ * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
+ * controller base address at $rn & ~0xfff and map size of
+ * 0x200 << ($rn & 0xfff), when MMU is off. */
+ goto bad_reg;
+ }
return 0;
}
bad_reg:
/* ??? For debugging only. Should raise illegal instruction exception. */
- cpu_abort(env, "Unimplemented cp15 register read\n");
+ cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
+ (insn >> 16) & 0xf, crm, op1, op2);
return 0;
}
+void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
+{
+ env->banked_r13[bank_number(mode)] = val;
+}
+
+uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
+{
+ return env->banked_r13[bank_number(mode)];
+}
+
+uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
+{
+ switch (reg) {
+ case 0: /* APSR */
+ return xpsr_read(env) & 0xf8000000;
+ case 1: /* IAPSR */
+ return xpsr_read(env) & 0xf80001ff;
+ case 2: /* EAPSR */
+ return xpsr_read(env) & 0xff00fc00;
+ case 3: /* xPSR */
+ return xpsr_read(env) & 0xff00fdff;
+ case 5: /* IPSR */
+ return xpsr_read(env) & 0x000001ff;
+ case 6: /* EPSR */
+ return xpsr_read(env) & 0x0700fc00;
+ case 7: /* IEPSR */
+ return xpsr_read(env) & 0x0700edff;
+ case 8: /* MSP */
+ return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
+ case 9: /* PSP */
+ return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
+ case 16: /* PRIMASK */
+ return (env->uncached_cpsr & CPSR_I) != 0;
+ case 17: /* FAULTMASK */
+ return (env->uncached_cpsr & CPSR_F) != 0;
+ case 18: /* BASEPRI */
+ case 19: /* BASEPRI_MAX */
+ return env->v7m.basepri;
+ case 20: /* CONTROL */
+ return env->v7m.control;
+ default:
+ /* ??? For debugging only. */
+ cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
+ return 0;
+ }
+}
+
+void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
+{
+ switch (reg) {
+ case 0: /* APSR */
+ xpsr_write(env, val, 0xf8000000);
+ break;
+ case 1: /* IAPSR */
+ xpsr_write(env, val, 0xf8000000);
+ break;
+ case 2: /* EAPSR */
+ xpsr_write(env, val, 0xfe00fc00);
+ break;
+ case 3: /* xPSR */
+ xpsr_write(env, val, 0xfe00fc00);
+ break;
+ case 5: /* IPSR */
+ /* IPSR bits are readonly. */
+ break;
+ case 6: /* EPSR */
+ xpsr_write(env, val, 0x0600fc00);
+ break;
+ case 7: /* IEPSR */
+ xpsr_write(env, val, 0x0600fc00);
+ break;
+ case 8: /* MSP */
+ if (env->v7m.current_sp)
+ env->v7m.other_sp = val;
+ else
+ env->regs[13] = val;
+ break;
+ case 9: /* PSP */
+ if (env->v7m.current_sp)
+ env->regs[13] = val;
+ else
+ env->v7m.other_sp = val;
+ break;
+ case 16: /* PRIMASK */
+ if (val & 1)
+ env->uncached_cpsr |= CPSR_I;
+ else
+ env->uncached_cpsr &= ~CPSR_I;
+ break;
+ case 17: /* FAULTMASK */
+ if (val & 1)
+ env->uncached_cpsr |= CPSR_F;
+ else
+ env->uncached_cpsr &= ~CPSR_F;
+ break;
+ case 18: /* BASEPRI */
+ env->v7m.basepri = val & 0xff;
+ break;
+ case 19: /* BASEPRI_MAX */
+ val &= 0xff;
+ if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
+ env->v7m.basepri = val;
+ break;
+ case 20: /* CONTROL */
+ env->v7m.control = val & 3;
+ switch_v7m_sp(env, (val & 2) != 0);
+ break;
+ default:
+ /* ??? For debugging only. */
+ cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
+ return;
+ }
+}
+
+void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
+ ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
+ void *opaque)
+{
+ if (cpnum < 0 || cpnum > 14) {
+ cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
+ return;
+ }
+
+ env->cp[cpnum].cp_read = cp_read;
+ env->cp[cpnum].cp_write = cp_write;
+ env->cp[cpnum].opaque = opaque;
+}
+
+#endif
+
+/* Note that signed overflow is undefined in C. The following routines are
+ careful to use unsigned types where modulo arithmetic is required.
+ Failure to do so _will_ break on newer gcc. */
+
+/* Signed saturating arithmetic. */
+
+/* Perform 16-bit signed saturating addition. */
+static inline uint16_t add16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating addition. */
+static inline uint8_t add8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+/* Perform 16-bit signed saturating subtraction. */
+static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating subtraction. */
+static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
+#define PFX q
+
+#include "op_addsub.h"
+
+/* Unsigned saturating arithmetic. */
+static inline uint16_t add16_usat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xffff;
+ return res;
+}
+
+static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
+{
+ if (a < b)
+ return a - b;
+ else
+ return 0;
+}
+
+static inline uint8_t add8_usat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xff;
+ return res;
+}
+
+static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
+{
+ if (a < b)
+ return a - b;
+ else
+ return 0;
+}
+
+#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
+#define PFX uq
+
+#include "op_addsub.h"
+
+/* Signed modulo arithmetic. */
+#define SARITH16(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \
+ RESULT(sum, n, 16); \
+ if (sum >= 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SARITH8(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \
+ RESULT(sum, n, 8); \
+ if (sum >= 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+
+#define ADD16(a, b, n) SARITH16(a, b, n, +)
+#define SUB16(a, b, n) SARITH16(a, b, n, -)
+#define ADD8(a, b, n) SARITH8(a, b, n, +)
+#define SUB8(a, b, n) SARITH8(a, b, n, -)
+#define PFX s
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Unsigned modulo arithmetic. */
+#define ADD16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 1) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define ADD8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 1) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define SUB16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SUB8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define PFX u
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Halved signed arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define PFX sh
+
+#include "op_addsub.h"
+
+/* Halved unsigned arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define PFX uh
+
+#include "op_addsub.h"
+
+static inline uint8_t do_usad(uint8_t a, uint8_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return b - a;
+}
+
+/* Unsigned sum of absolute byte differences. */
+uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
+{
+ uint32_t sum;
+ sum = do_usad(a, b);
+ sum += do_usad(a >> 8, b >> 8);
+ sum += do_usad(a >> 16, b >>16);
+ sum += do_usad(a >> 24, b >> 24);
+ return sum;
+}
+
+/* For ARMv6 SEL instruction. */
+uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (flags & 1)
+ mask |= 0xff;
+ if (flags & 2)
+ mask |= 0xff00;
+ if (flags & 4)
+ mask |= 0xff0000;
+ if (flags & 8)
+ mask |= 0xff000000;
+ return (a & mask) | (b & ~mask);
+}
+
+uint32_t HELPER(logicq_cc)(uint64_t val)
+{
+ return (val >> 32) | (val != 0);
+}
+
+/* VFP support. We follow the convention used for VFP instrunctions:
+ Single precition routines have a "s" suffix, double precision a
+ "d" suffix. */
+
+/* Convert host exception flags to vfp form. */
+static inline int vfp_exceptbits_from_host(int host_bits)
+{
+ int target_bits = 0;
+
+ if (host_bits & float_flag_invalid)
+ target_bits |= 1;
+ if (host_bits & float_flag_divbyzero)
+ target_bits |= 2;
+ if (host_bits & float_flag_overflow)
+ target_bits |= 4;
+ if (host_bits & float_flag_underflow)
+ target_bits |= 8;
+ if (host_bits & float_flag_inexact)
+ target_bits |= 0x10;
+ return target_bits;
+}
+
+uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
+{
+ int i;
+ uint32_t fpscr;
+
+ fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+ i = get_float_exception_flags(&env->vfp.fp_status);
+ fpscr |= vfp_exceptbits_from_host(i);
+ return fpscr;
+}
+
+/* Convert vfp exception flags to target form. */
+static inline int vfp_exceptbits_to_host(int target_bits)
+{
+ int host_bits = 0;
+
+ if (target_bits & 1)
+ host_bits |= float_flag_invalid;
+ if (target_bits & 2)
+ host_bits |= float_flag_divbyzero;
+ if (target_bits & 4)
+ host_bits |= float_flag_overflow;
+ if (target_bits & 8)
+ host_bits |= float_flag_underflow;
+ if (target_bits & 0x10)
+ host_bits |= float_flag_inexact;
+ return host_bits;
+}
+
+void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed;
+
+ changed = env->vfp.xregs[ARM_VFP_FPSCR];
+ env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
+ env->vfp.vec_len = (val >> 16) & 7;
+ env->vfp.vec_stride = (val >> 20) & 3;
+
+ changed ^= val;
+ if (changed & (3 << 22)) {
+ i = (val >> 22) & 3;
+ switch (i) {
+ case 0:
+ i = float_round_nearest_even;
+ break;
+ case 1:
+ i = float_round_up;
+ break;
+ case 2:
+ i = float_round_down;
+ break;
+ case 3:
+ i = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(i, &env->vfp.fp_status);
+ }
+
+ i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
+ set_float_exception_flags(i, &env->vfp.fp_status);
+ /* XXX: FZ and DN are not implemented. */
+}
+
+#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
+
+#define VFP_BINOP(name) \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
+{ \
+ return float32_ ## name (a, b, &env->vfp.fp_status); \
+} \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
+{ \
+ return float64_ ## name (a, b, &env->vfp.fp_status); \
+}
+VFP_BINOP(add)
+VFP_BINOP(sub)
+VFP_BINOP(mul)
+VFP_BINOP(div)
+#undef VFP_BINOP
+
+float32 VFP_HELPER(neg, s)(float32 a)
+{
+ return float32_chs(a);
+}
+
+float64 VFP_HELPER(neg, d)(float64 a)
+{
+ return float64_chs(a);
+}
+
+float32 VFP_HELPER(abs, s)(float32 a)
+{
+ return float32_abs(a);
+}
+
+float64 VFP_HELPER(abs, d)(float64 a)
+{
+ return float64_abs(a);
+}
+
+float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
+{
+ return float32_sqrt(a, &env->vfp.fp_status);
+}
+
+float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
+{
+ return float64_sqrt(a, &env->vfp.fp_status);
+}
+
+/* XXX: check quiet/signaling case */
+#define DO_VFP_cmp(p, type) \
+void VFP_HELPER(cmp, p)(type a, type b, CPUState *env) \
+{ \
+ uint32_t flags; \
+ switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
+ case 0: flags = 0x6; break; \
+ case -1: flags = 0x8; break; \
+ case 1: flags = 0x2; break; \
+ default: case 2: flags = 0x3; break; \
+ } \
+ env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
+ | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
+} \
+void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
+{ \
+ uint32_t flags; \
+ switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
+ case 0: flags = 0x6; break; \
+ case -1: flags = 0x8; break; \
+ case 1: flags = 0x2; break; \
+ default: case 2: flags = 0x3; break; \
+ } \
+ env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
+ | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
+}
+DO_VFP_cmp(s, float32)
+DO_VFP_cmp(d, float64)
+#undef DO_VFP_cmp
+
+/* Helper routines to perform bitwise copies between float and int. */
+static inline float32 vfp_itos(uint32_t i)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.i = i;
+ return v.s;
+}
+
+static inline uint32_t vfp_stoi(float32 s)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.s = s;
+ return v.i;
+}
+
+static inline float64 vfp_itod(uint64_t i)
+{
+ union {
+ uint64_t i;
+ float64 d;
+ } v;
+
+ v.i = i;
+ return v.d;
+}
+
+static inline uint64_t vfp_dtoi(float64 d)
+{
+ union {
+ uint64_t i;
+ float64 d;
+ } v;
+
+ v.d = d;
+ return v.i;
+}
+
+/* Integer to float conversion. */
+float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
+{
+ return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
+}
+
+float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
+{
+ return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
+}
+
+float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
+{
+ return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
+}
+
+float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
+{
+ return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
+}
+
+/* Float to integer conversion. */
+float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
+{
+ return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
+{
+ return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
+{
+ return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
+{
+ return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
+{
+ return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
+{
+ return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
+{
+ return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
+}
+
+float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
+{
+ return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
+}
+
+/* floating point conversion */
+float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
+{
+ return float32_to_float64(x, &env->vfp.fp_status);
+}
+
+float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
+{
+ return float64_to_float32(x, &env->vfp.fp_status);
+}
+
+/* VFP3 fixed point conversion. */
+#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
+ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
+{ \
+ ftype tmp; \
+ tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
+ &env->vfp.fp_status); \
+ return ftype##_scalbn(tmp, shift, &env->vfp.fp_status); \
+} \
+ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
+{ \
+ ftype tmp; \
+ tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
+ return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
+ &env->vfp.fp_status)); \
+}
+
+VFP_CONV_FIX(sh, d, float64, int16, )
+VFP_CONV_FIX(sl, d, float64, int32, )
+VFP_CONV_FIX(uh, d, float64, uint16, u)
+VFP_CONV_FIX(ul, d, float64, uint32, u)
+VFP_CONV_FIX(sh, s, float32, int16, )
+VFP_CONV_FIX(sl, s, float32, int32, )
+VFP_CONV_FIX(uh, s, float32, uint16, u)
+VFP_CONV_FIX(ul, s, float32, uint32, u)
+#undef VFP_CONV_FIX
+
+float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 two = int32_to_float32(2, s);
+ return float32_sub(two, float32_mul(a, b, s), s);
+}
+
+float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 three = int32_to_float32(3, s);
+ return float32_sub(three, float32_mul(a, b, s), s);
+}
+
+/* NEON helpers. */
+
+/* TODO: The architecture specifies the value that the estimate functions
+ should return. We return the exact reciprocal/root instead. */
+float32 HELPER(recpe_f32)(float32 a, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 one = int32_to_float32(1, s);
+ return float32_div(one, a, s);
+}
+
+float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 one = int32_to_float32(1, s);
+ return float32_div(one, float32_sqrt(a, s), s);
+}
+
+uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 tmp;
+ tmp = int32_to_float32(a, s);
+ tmp = float32_scalbn(tmp, -32, s);
+ tmp = helper_recpe_f32(tmp, env);
+ tmp = float32_scalbn(tmp, 31, s);
+ return float32_to_int32(tmp, s);
+}
+
+uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
+{
+ float_status *s = &env->vfp.fp_status;
+ float32 tmp;
+ tmp = int32_to_float32(a, s);
+ tmp = float32_scalbn(tmp, -32, s);
+ tmp = helper_rsqrte_f32(tmp, env);
+ tmp = float32_scalbn(tmp, 31, s);
+ return float32_to_int32(tmp, s);
+}
+
+#ifdef CONFIG_TRACE
+#include "trace.h"
+void HELPER(traceTicks)(uint32_t ticks)
+{
+ sim_time += ticks;
+}
+
+void HELPER(traceInsn)(void)
+{
+ trace_insn_helper();
+}
+
+#if HOST_LONG_BITS == 32
+void HELPER(traceBB32)(uint32_t hi, uint32_t lo, uint32_t tb)
+{
+ uint64_t bb_num = ((uint64_t)hi << 32) | lo;
+ trace_bb_helper(bb_num, (void*)tb);
+}
#endif
+
+#if HOST_LONG_BITS == 64
+void HELPER(traceBB64)(uint64_t bb_num, uint64_t tb)
+{
+ trace_bb_helper(bb_num, (void*)tb);
+}
+#endif
+
+#endif /* CONFIG_TRACE */
diff --git a/target-arm/helpers.h b/target-arm/helpers.h
new file mode 100644
index 0000000..cef53be
--- /dev/null
+++ b/target-arm/helpers.h
@@ -0,0 +1,548 @@
+#define DEF_HELPER(name, ret, args) ret glue(helper_,name) args;
+
+#ifdef GEN_HELPER
+#define DEF_HELPER_0_0(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(void) \
+{ \
+ tcg_gen_helper_0_0(helper_##name); \
+}
+#define DEF_HELPER_0_1(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv arg1) \
+{ \
+ tcg_gen_helper_0_1(helper_##name, arg1); \
+}
+#define DEF_HELPER_0_2(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv arg1, TCGv arg2) \
+{ \
+ tcg_gen_helper_0_2(helper_##name, arg1, arg2); \
+}
+#define DEF_HELPER_0_3(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name( \
+ TCGv arg1, TCGv arg2, TCGv arg3) \
+{ \
+ tcg_gen_helper_0_3(helper_##name, arg1, arg2, arg3); \
+}
+#define DEF_HELPER_1_0(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv ret) \
+{ \
+ tcg_gen_helper_1_0(helper_##name, ret); \
+}
+#define DEF_HELPER_1_1(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv ret, TCGv arg1) \
+{ \
+ tcg_gen_helper_1_1(helper_##name, ret, arg1); \
+}
+#define DEF_HELPER_1_2(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv ret, TCGv arg1, TCGv arg2) \
+{ \
+ tcg_gen_helper_1_2(helper_##name, ret, arg1, arg2); \
+}
+#define DEF_HELPER_1_3(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv ret, \
+ TCGv arg1, TCGv arg2, TCGv arg3) \
+{ \
+ tcg_gen_helper_1_3(helper_##name, ret, arg1, arg2, arg3); \
+}
+#define DEF_HELPER_1_4(name, ret, args) \
+DEF_HELPER(name, ret, args) \
+static inline void gen_helper_##name(TCGv ret, \
+ TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4) \
+{ \
+ tcg_gen_helper_1_4(helper_##name, ret, arg1, arg2, arg3, arg4); \
+}
+#else /* !GEN_HELPER */
+#define DEF_HELPER_0_0 DEF_HELPER
+#define DEF_HELPER_0_1 DEF_HELPER
+#define DEF_HELPER_0_2 DEF_HELPER
+#define DEF_HELPER_0_3 DEF_HELPER
+#define DEF_HELPER_1_0 DEF_HELPER
+#define DEF_HELPER_1_1 DEF_HELPER
+#define DEF_HELPER_1_2 DEF_HELPER
+#define DEF_HELPER_1_3 DEF_HELPER
+#define DEF_HELPER_1_4 DEF_HELPER
+#define HELPER(x) glue(helper_,x)
+#endif
+
+DEF_HELPER_1_1(clz, uint32_t, (uint32_t))
+DEF_HELPER_1_1(sxtb16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(uxtb16, uint32_t, (uint32_t))
+
+DEF_HELPER_1_2(add_setq, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(add_saturate, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sub_saturate, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(add_usaturate, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sub_usaturate, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_1(double_saturate, uint32_t, (int32_t))
+DEF_HELPER_1_2(sdiv, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(udiv, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_1(rbit, uint32_t, (uint32_t))
+DEF_HELPER_1_1(abs, uint32_t, (uint32_t))
+
+#ifdef CONFIG_TRACE
+DEF_HELPER_0_1(traceTicks,void,(uint32_t))
+DEF_HELPER_0_0(traceInsn,void,(void))
+DEF_HELPER_0_3(traceBB32,void,(uint32_t,uint32_t,uint32_t))
+DEF_HELPER_0_2(traceBB64,void,(uint64_t,uint64_t))
+#endif
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_1_3(pfx ## add8, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
+ DEF_HELPER_1_3(pfx ## sub8, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
+ DEF_HELPER_1_3(pfx ## sub16, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
+ DEF_HELPER_1_3(pfx ## add16, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
+ DEF_HELPER_1_3(pfx ## addsubx, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
+ DEF_HELPER_1_3(pfx ## subaddx, uint32_t, (uint32_t, uint32_t, uint32_t *))
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_1_2(pfx ## add8, uint32_t, (uint32_t, uint32_t)) \
+ DEF_HELPER_1_2(pfx ## sub8, uint32_t, (uint32_t, uint32_t)) \
+ DEF_HELPER_1_2(pfx ## sub16, uint32_t, (uint32_t, uint32_t)) \
+ DEF_HELPER_1_2(pfx ## add16, uint32_t, (uint32_t, uint32_t)) \
+ DEF_HELPER_1_2(pfx ## addsubx, uint32_t, (uint32_t, uint32_t)) \
+ DEF_HELPER_1_2(pfx ## subaddx, uint32_t, (uint32_t, uint32_t))
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_1_2(ssat, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(usat, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(ssat16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(usat16, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(usad8, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(logicq_cc, uint32_t, (uint64_t))
+
+DEF_HELPER_1_3(sel_flags, uint32_t, (uint32_t, uint32_t, uint32_t))
+DEF_HELPER_0_1(exception, void, (uint32_t))
+DEF_HELPER_0_0(wfi, void, (void))
+
+DEF_HELPER_0_2(cpsr_write, void, (uint32_t, uint32_t))
+DEF_HELPER_1_0(cpsr_read, uint32_t, (void))
+
+DEF_HELPER_0_3(v7m_msr, void, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_2(v7m_mrs, uint32_t, (CPUState *, uint32_t))
+
+DEF_HELPER_0_3(set_cp15, void, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_2(get_cp15, uint32_t, (CPUState *, uint32_t))
+
+DEF_HELPER_0_3(set_cp, void, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_2(get_cp, uint32_t, (CPUState *, uint32_t))
+
+DEF_HELPER_1_2(get_r13_banked, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t))
+
+DEF_HELPER_0_2(mark_exclusive, void, (CPUState *, uint32_t))
+DEF_HELPER_1_2(test_exclusive, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_0_1(clrex, void, (CPUState *))
+
+DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t))
+DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(vfp_get_fpscr, uint32_t, (CPUState *))
+DEF_HELPER_0_2(vfp_set_fpscr, void, (CPUState *, uint32_t))
+
+DEF_HELPER_1_3(vfp_adds, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_3(vfp_addd, float64, (float64, float64, CPUState *))
+DEF_HELPER_1_3(vfp_subs, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_3(vfp_subd, float64, (float64, float64, CPUState *))
+DEF_HELPER_1_3(vfp_muls, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_3(vfp_muld, float64, (float64, float64, CPUState *))
+DEF_HELPER_1_3(vfp_divs, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_3(vfp_divd, float64, (float64, float64, CPUState *))
+DEF_HELPER_1_1(vfp_negs, float32, (float32))
+DEF_HELPER_1_1(vfp_negd, float64, (float64))
+DEF_HELPER_1_1(vfp_abss, float32, (float32))
+DEF_HELPER_1_1(vfp_absd, float64, (float64))
+DEF_HELPER_1_2(vfp_sqrts, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_sqrtd, float64, (float64, CPUState *))
+DEF_HELPER_0_3(vfp_cmps, void, (float32, float32, CPUState *))
+DEF_HELPER_0_3(vfp_cmpd, void, (float64, float64, CPUState *))
+DEF_HELPER_0_3(vfp_cmpes, void, (float32, float32, CPUState *))
+DEF_HELPER_0_3(vfp_cmped, void, (float64, float64, CPUState *))
+
+DEF_HELPER_1_2(vfp_fcvtds, float64, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_fcvtsd, float32, (float64, CPUState *))
+
+DEF_HELPER_1_2(vfp_uitos, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_uitod, float64, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_sitos, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_sitod, float64, (float32, CPUState *))
+
+DEF_HELPER_1_2(vfp_touis, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_touid, float32, (float64, CPUState *))
+DEF_HELPER_1_2(vfp_touizs, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_touizd, float32, (float64, CPUState *))
+DEF_HELPER_1_2(vfp_tosis, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_tosid, float32, (float64, CPUState *))
+DEF_HELPER_1_2(vfp_tosizs, float32, (float32, CPUState *))
+DEF_HELPER_1_2(vfp_tosizd, float32, (float64, CPUState *))
+
+DEF_HELPER_1_3(vfp_toshs, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_tosls, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_touhs, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_touls, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_toshd, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_tosld, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_touhd, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_tould, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_shtos, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_sltos, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_uhtos, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_ultos, float32, (float32, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_shtod, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_sltod, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_uhtod, float64, (float64, uint32_t, CPUState *))
+DEF_HELPER_1_3(vfp_ultod, float64, (float64, uint32_t, CPUState *))
+
+DEF_HELPER_1_3(recps_f32, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_3(rsqrts_f32, float32, (float32, float32, CPUState *))
+DEF_HELPER_1_2(recpe_f32, float32, (float32, CPUState *))
+DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *))
+DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *))
+DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *))
+DEF_HELPER_1_4(neon_tbl, uint32_t, (uint32_t, uint32_t, uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_saturate_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_add_saturate_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_sub_saturate_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_sub_saturate_s64, uint64_t, (uint64_t, uint64_t))
+
+DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sub_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sbc_cc, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(shl, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(shr, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sar, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(ror, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(shl_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(shr_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(sar_cc, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(ror_cc, uint32_t, (uint32_t, uint32_t))
+
+/* neon_helper.c */
+DEF_HELPER_1_3(neon_qadd_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_hadd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_hadd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_rhadd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_hsub_u32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_cgt_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_min_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_abd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_shl_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_shl_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_rshl_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_rshl_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qrshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qrshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+
+DEF_HELPER_1_2(neon_add_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_padd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_padd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_p8, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_tst_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_tst_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_tst_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_abs_s8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_abs_s16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_clz_u8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_clz_u16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s32, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cnt_u8, uint32_t, (uint32_t))
+
+DEF_HELPER_1_3(neon_qdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_narrow_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u8, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s8, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u16, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s16, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u32, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s32, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_1(neon_narrow_high_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_high_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_round_high_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_round_high_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_widen_u8, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_s8, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_u16, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_s16, uint64_t, (uint32_t))
+
+DEF_HELPER_1_2(neon_addl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_addl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_paddl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_paddl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_subl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_subl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_addl_saturate_s32, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_addl_saturate_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_abdl_u16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_u32, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s32, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_u64, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s64, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_u8, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_s8, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_u16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_s16, uint64_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_negl_u16, uint64_t, (uint64_t))
+DEF_HELPER_1_1(neon_negl_u32, uint64_t, (uint64_t))
+DEF_HELPER_1_1(neon_negl_u64, uint64_t, (uint64_t))
+
+DEF_HELPER_1_2(neon_qabs_s8, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qabs_s16, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qabs_s32, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s8, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s16, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s32, uint32_t, (CPUState *, uint32_t))
+
+DEF_HELPER_0_0(neon_trn_u8, void, (void))
+DEF_HELPER_0_0(neon_trn_u16, void, (void))
+DEF_HELPER_0_0(neon_unzip_u8, void, (void))
+DEF_HELPER_0_0(neon_zip_u8, void, (void))
+DEF_HELPER_0_0(neon_zip_u16, void, (void))
+
+DEF_HELPER_1_2(neon_min_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_acge_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_acgt_f32, uint32_t, (uint32_t, uint32_t))
+
+/* iwmmxt_helper.c */
+DEF_HELPER_1_2(iwmmxt_maddsq, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_madduq, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_sadb, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_sadw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_mulslw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_mulshw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_mululw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_muluhw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_macsw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(iwmmxt_macuw, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_1(iwmmxt_setpsr_nz, uint32_t, (uint64_t))
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_1_3(iwmmxt_##name##b, uint64_t, (CPUState *, uint64_t, uint64_t)) \
+DEF_HELPER_1_3(iwmmxt_##name##w, uint64_t, (CPUState *, uint64_t, uint64_t)) \
+DEF_HELPER_1_3(iwmmxt_##name##l, uint64_t, (CPUState *, uint64_t, uint64_t)) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_1_2(iwmmxt_unpacklub, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackluw, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpacklul, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhub, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhuw, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhul, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpacklsb, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpacklsw, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpacklsl, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhsb, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhsw, uint64_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(iwmmxt_unpackhsl, uint64_t, (CPUState *, uint64_t))
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_1_3(iwmmxt_avgb0, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_avgb1, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_avgw0, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_avgw1, uint64_t, (CPUState *, uint64_t, uint64_t))
+
+DEF_HELPER_1_2(iwmmxt_msadb, uint64_t, (uint64_t, uint64_t))
+
+DEF_HELPER_1_3(iwmmxt_align, uint64_t, (uint64_t, uint64_t, uint32_t))
+DEF_HELPER_1_4(iwmmxt_insr, uint64_t, (uint64_t, uint32_t, uint32_t, uint32_t))
+
+DEF_HELPER_1_1(iwmmxt_bcstb, uint64_t, (uint32_t))
+DEF_HELPER_1_1(iwmmxt_bcstw, uint64_t, (uint32_t))
+DEF_HELPER_1_1(iwmmxt_bcstl, uint64_t, (uint32_t))
+
+DEF_HELPER_1_1(iwmmxt_addcb, uint64_t, (uint64_t))
+DEF_HELPER_1_1(iwmmxt_addcw, uint64_t, (uint64_t))
+DEF_HELPER_1_1(iwmmxt_addcl, uint64_t, (uint64_t))
+
+DEF_HELPER_1_1(iwmmxt_msbb, uint32_t, (uint64_t))
+DEF_HELPER_1_1(iwmmxt_msbw, uint32_t, (uint64_t))
+DEF_HELPER_1_1(iwmmxt_msbl, uint32_t, (uint64_t))
+
+DEF_HELPER_1_3(iwmmxt_srlw, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_srll, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_srlq, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_sllw, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_slll, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_sllq, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_sraw, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_sral, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_sraq, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_rorw, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_rorl, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_rorq, uint64_t, (CPUState *, uint64_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_shufh, uint64_t, (CPUState *, uint64_t, uint32_t))
+
+DEF_HELPER_1_3(iwmmxt_packuw, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_packul, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_packuq, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_packsw, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_packsl, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(iwmmxt_packsq, uint64_t, (CPUState *, uint64_t, uint64_t))
+
+DEF_HELPER_1_3(iwmmxt_muladdsl, uint64_t, (uint64_t, uint32_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_muladdsw, uint64_t, (uint64_t, uint32_t, uint32_t))
+DEF_HELPER_1_3(iwmmxt_muladdswl, uint64_t, (uint64_t, uint32_t, uint32_t))
+
+#undef DEF_HELPER
+#undef DEF_HELPER_0_0
+#undef DEF_HELPER_0_1
+#undef DEF_HELPER_0_2
+#undef DEF_HELPER_0_3
+#undef DEF_HELPER_1_0
+#undef DEF_HELPER_1_1
+#undef DEF_HELPER_1_2
+#undef DEF_HELPER_1_3
+#undef DEF_HELPER_1_4
+#undef GEN_HELPER
diff --git a/target-arm/iwmmxt_helper.c b/target-arm/iwmmxt_helper.c
new file mode 100644
index 0000000..6e801c8
--- /dev/null
+++ b/target-arm/iwmmxt_helper.c
@@ -0,0 +1,682 @@
+/*
+ * iwMMXt micro operations for XScale.
+ *
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ * Copyright (c) 2008 CodeSourcery
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "helpers.h"
+
+/* iwMMXt macros extracted from GNU gdb. */
+
+/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
+#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
+#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
+#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
+#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
+/* Flags to pass as "n" above. */
+#define SIMD_NBIT -1
+#define SIMD_ZBIT -2
+#define SIMD_CBIT -3
+#define SIMD_VBIT -4
+/* Various status bit macros. */
+#define NBIT8(x) ((x) & 0x80)
+#define NBIT16(x) ((x) & 0x8000)
+#define NBIT32(x) ((x) & 0x80000000)
+#define NBIT64(x) ((x) & 0x8000000000000000ULL)
+#define ZBIT8(x) (((x) & 0xff) == 0)
+#define ZBIT16(x) (((x) & 0xffff) == 0)
+#define ZBIT32(x) (((x) & 0xffffffff) == 0)
+#define ZBIT64(x) (x == 0)
+/* Sign extension macros. */
+#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
+#define EXTEND8(a) ((uint32_t) (int8_t) (a))
+#define EXTEND16(a) ((uint32_t) (int16_t) (a))
+#define EXTEND16S(a) ((int32_t) (int16_t) (a))
+#define EXTEND32(a) ((uint64_t) (int32_t) (a))
+
+uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) +
+ EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((uint64_t) (
+ EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) +
+ EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) +
+ ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((
+ ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
+ ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b)
+{
+#define abs(x) (((x) >= 0) ? x : -x)
+#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff))
+ return
+ SADB(0) + SADB(8) + SADB(16) + SADB(24) +
+ SADB(32) + SADB(40) + SADB(48) + SADB(56);
+#undef SADB
+}
+
+uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b)
+{
+#define SADW(SHR) \
+ abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff))
+ return SADW(0) + SADW(16) + SADW(32) + SADW(48);
+#undef SADW
+}
+
+uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b)
+{
+#define MACS(SHR) ( \
+ EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff))
+ return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48));
+#undef MACS
+}
+
+uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b)
+{
+#define MACU(SHR) ( \
+ (uint32_t) ((a >> SHR) & 0xffff) * \
+ (uint32_t) ((b >> SHR) & 0xffff))
+ return MACU(0) + MACU(16) + MACU(32) + MACU(48);
+#undef MACU
+}
+
+#define NZBIT8(x, i) \
+ SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \
+ SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i)
+#define NZBIT16(x, i) \
+ SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \
+ SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i)
+#define NZBIT32(x, i) \
+ SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \
+ SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i)
+#define NZBIT64(x) \
+ SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
+ SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
+#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
+ (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
+ (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
+ (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffff) << 0) | \
+ (((b >> SH0) & 0xffff) << 16) | \
+ (((a >> SH2) & 0xffff) << 32) | \
+ (((b >> SH2) & 0xffff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
+ NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffffffff) << 0) | \
+ (((b >> SH0) & 0xffffffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xff) << 0) | \
+ (((x >> SH1) & 0xff) << 16) | \
+ (((x >> SH2) & 0xff) << 32) | \
+ (((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xffff) << 0) | \
+ (((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = (((x >> SH0) & 0xffffffff) << 0); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
+ ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
+ ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
+ ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
+ ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUState *env, \
+ uint64_t x) \
+{ \
+ x = EXTEND32((x >> SH0) & 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+}
+IWMMXT_OP_UNPACK(l, 0, 8, 16, 24)
+IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
+
+#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
+ CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
+ CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
+ CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
+ CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tl, O, 0xffffffff) | \
+ CMP(32, Tl, O, 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+}
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR)
+IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==)
+IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR))
+IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <)
+IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <)
+IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +)
+#undef CMP
+/* TODO Signed- and Unsigned-Saturation */
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +)
+IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -)
+IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +)
+#undef CMP
+#undef IWMMXT_OP_CMP
+
+#define AVGB(SHR) ((( \
+ ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGB(r) \
+uint64_t HELPER(iwmmxt_avgb##r)(CPUState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
+ AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \
+ SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \
+ SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \
+ SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \
+ SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \
+ SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \
+ SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \
+ SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \
+ return a; \
+}
+IWMMXT_OP_AVGB(0)
+IWMMXT_OP_AVGB(1)
+#undef IWMMXT_OP_AVGB
+#undef AVGB
+
+#define AVGW(SHR) ((( \
+ ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGW(r) \
+uint64_t HELPER(iwmmxt_avgw##r)(CPUState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \
+ SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \
+ SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \
+ SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \
+ return a; \
+}
+IWMMXT_OP_AVGW(0)
+IWMMXT_OP_AVGW(1)
+#undef IWMMXT_OP_AVGW
+#undef AVGW
+
+uint64_t HELPER(iwmmxt_msadb)(uint64_t a, uint64_t b)
+{
+ a = ((((a >> 0 ) & 0xffff) * ((b >> 0) & 0xffff) +
+ ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)) & 0xffffffff) |
+ ((((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
+ ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n)
+{
+ a >>= n << 3;
+ a |= b << (64 - (n << 3));
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n)
+{
+ x &= ~((uint64_t) b << n);
+ x |= (uint64_t) (a & b) << n;
+ return x;
+}
+
+uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x)
+{
+ return SIMD64_SET((x == 0), SIMD_ZBIT) |
+ SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT);
+}
+
+uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg)
+{
+ arg &= 0xff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) |
+ ((uint64_t) arg << 16) | ((uint64_t) arg << 24) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 40) |
+ ((uint64_t) arg << 48) | ((uint64_t) arg << 56);
+}
+
+uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg)
+{
+ arg &= 0xffff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 48);
+}
+
+uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg)
+{
+ return arg | ((uint64_t) arg << 32);
+}
+
+uint64_t HELPER(iwmmxt_addcb)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xff) + ((x >> 8) & 0xff) +
+ ((x >> 16) & 0xff) + ((x >> 24) & 0xff) +
+ ((x >> 32) & 0xff) + ((x >> 40) & 0xff) +
+ ((x >> 48) & 0xff) + ((x >> 56) & 0xff);
+}
+
+uint64_t HELPER(iwmmxt_addcw)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) +
+ ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff);
+}
+
+uint64_t HELPER(iwmmxt_addcl)(uint64_t x)
+{
+ return (x & 0xffffffff) + (x >> 32);
+}
+
+uint32_t HELPER(iwmmxt_msbb)(uint64_t x)
+{
+ return
+ ((x >> 7) & 0x01) | ((x >> 14) & 0x02) |
+ ((x >> 21) & 0x04) | ((x >> 28) & 0x08) |
+ ((x >> 35) & 0x10) | ((x >> 42) & 0x20) |
+ ((x >> 49) & 0x40) | ((x >> 56) & 0x80);
+}
+
+uint32_t HELPER(iwmmxt_msbw)(uint64_t x)
+{
+ return
+ ((x >> 15) & 0x01) | ((x >> 30) & 0x02) |
+ ((x >> 45) & 0x04) | ((x >> 52) & 0x08);
+}
+
+uint32_t HELPER(iwmmxt_msbl)(uint64_t x)
+{
+ return ((x >> 31) & 0x01) | ((x >> 62) & 0x02);
+}
+
+/* FIXME: Split wCASF setting into a separate op to avoid env use. */
+uint64_t HELPER(iwmmxt_srlw)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) >> n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srll)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srlq)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x >>= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllw)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) << n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_slll)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = ((x << n) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << n);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllq)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x <<= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraw)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) |
+ ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) |
+ ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) |
+ ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sral)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) |
+ (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraq)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (int64_t) x >> n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorw)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = ((((x & (0xffffll << 0)) >> n) |
+ ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) |
+ ((((x & (0xffffll << 16)) >> n) |
+ ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) |
+ ((((x & (0xffffll << 32)) >> n) |
+ ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) |
+ ((((x & (0xffffll << 48)) >> n) |
+ ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorl)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32)) |
+ ((x << (32 - n)) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << (32 - n));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorq)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (x >> n) | (x << (64 - n));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_shufh)(CPUState *env, uint64_t x, uint32_t n)
+{
+ x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) |
+ (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) |
+ (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) |
+ (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+/* TODO: Unsigned-Saturation */
+uint64_t HELPER(iwmmxt_packuw)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packul)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packuq)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+/* TODO: Signed-Saturation */
+uint64_t HELPER(iwmmxt_packsw)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsl)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsq)(CPUState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b));
+}
+
+uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b)
+{
+ c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) *
+ EXTEND16S((b >> 0) & 0xffff));
+ c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) *
+ EXTEND16S((b >> 16) & 0xffff));
+ return c;
+}
+
+uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + (EXTEND32(EXTEND16S(a & 0xffff) *
+ EXTEND16S(b & 0xffff)));
+}
diff --git a/target-arm/machine.c b/target-arm/machine.c
new file mode 100644
index 0000000..3368741
--- /dev/null
+++ b/target-arm/machine.c
@@ -0,0 +1,218 @@
+#include "hw/hw.h"
+#include "hw/boards.h"
+
+void register_machines(void)
+{
+#if 0 /* ANDROID */
+ qemu_register_machine(&integratorcp_machine);
+ qemu_register_machine(&versatilepb_machine);
+ qemu_register_machine(&versatileab_machine);
+ qemu_register_machine(&realview_machine);
+ qemu_register_machine(&akitapda_machine);
+ qemu_register_machine(&spitzpda_machine);
+ qemu_register_machine(&borzoipda_machine);
+ qemu_register_machine(&terrierpda_machine);
+ qemu_register_machine(&palmte_machine);
+ qemu_register_machine(&n800_machine);
+ qemu_register_machine(&n810_machine);
+ qemu_register_machine(&lm3s811evb_machine);
+ qemu_register_machine(&lm3s6965evb_machine);
+ qemu_register_machine(&connex_machine);
+ qemu_register_machine(&verdex_machine);
+ qemu_register_machine(&mainstone2_machine);
+ qemu_register_machine(&musicpal_machine);
+ qemu_register_machine(&tosapda_machine);
+#endif
+ qemu_register_machine(&android_arm_machine);
+}
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ int i;
+ CPUARMState *env = (CPUARMState *)opaque;
+
+ for (i = 0; i < 16; i++) {
+ qemu_put_be32(f, env->regs[i]);
+ }
+ qemu_put_be32(f, cpsr_read(env));
+ qemu_put_be32(f, env->spsr);
+ for (i = 0; i < 6; i++) {
+ qemu_put_be32(f, env->banked_spsr[i]);
+ qemu_put_be32(f, env->banked_r13[i]);
+ qemu_put_be32(f, env->banked_r14[i]);
+ }
+ for (i = 0; i < 5; i++) {
+ qemu_put_be32(f, env->usr_regs[i]);
+ qemu_put_be32(f, env->fiq_regs[i]);
+ }
+ qemu_put_be32(f, env->cp15.c0_cpuid);
+ qemu_put_be32(f, env->cp15.c0_cachetype);
+ qemu_put_be32(f, env->cp15.c1_sys);
+ qemu_put_be32(f, env->cp15.c1_coproc);
+ qemu_put_be32(f, env->cp15.c1_xscaleauxcr);
+ qemu_put_be32(f, env->cp15.c2_base0);
+ qemu_put_be32(f, env->cp15.c2_base1);
+ qemu_put_be32(f, env->cp15.c2_mask);
+ qemu_put_be32(f, env->cp15.c2_data);
+ qemu_put_be32(f, env->cp15.c2_insn);
+ qemu_put_be32(f, env->cp15.c3);
+ qemu_put_be32(f, env->cp15.c5_insn);
+ qemu_put_be32(f, env->cp15.c5_data);
+ for (i = 0; i < 8; i++) {
+ qemu_put_be32(f, env->cp15.c6_region[i]);
+ }
+ qemu_put_be32(f, env->cp15.c6_insn);
+ qemu_put_be32(f, env->cp15.c6_data);
+ qemu_put_be32(f, env->cp15.c9_insn);
+ qemu_put_be32(f, env->cp15.c9_data);
+ qemu_put_be32(f, env->cp15.c13_fcse);
+ qemu_put_be32(f, env->cp15.c13_context);
+ qemu_put_be32(f, env->cp15.c13_tls1);
+ qemu_put_be32(f, env->cp15.c13_tls2);
+ qemu_put_be32(f, env->cp15.c13_tls3);
+ qemu_put_be32(f, env->cp15.c15_cpar);
+
+ qemu_put_be32(f, env->features);
+
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ for (i = 0; i < 16; i++) {
+ CPU_DoubleU u;
+ u.d = env->vfp.regs[i];
+ qemu_put_be32(f, u.l.upper);
+ qemu_put_be32(f, u.l.lower);
+ }
+ for (i = 0; i < 16; i++) {
+ qemu_put_be32(f, env->vfp.xregs[i]);
+ }
+
+ /* TODO: Should use proper FPSCR access functions. */
+ qemu_put_be32(f, env->vfp.vec_len);
+ qemu_put_be32(f, env->vfp.vec_stride);
+
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ for (i = 16; i < 32; i++) {
+ CPU_DoubleU u;
+ u.d = env->vfp.regs[i];
+ qemu_put_be32(f, u.l.upper);
+ qemu_put_be32(f, u.l.lower);
+ }
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ for (i = 0; i < 16; i++) {
+ qemu_put_be64(f, env->iwmmxt.regs[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ qemu_put_be32(f, env->iwmmxt.cregs[i]);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ qemu_put_be32(f, env->v7m.other_sp);
+ qemu_put_be32(f, env->v7m.vecbase);
+ qemu_put_be32(f, env->v7m.basepri);
+ qemu_put_be32(f, env->v7m.control);
+ qemu_put_be32(f, env->v7m.current_sp);
+ qemu_put_be32(f, env->v7m.exception);
+ }
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CPUARMState *env = (CPUARMState *)opaque;
+ int i;
+
+ if (version_id != CPU_SAVE_VERSION)
+ return -EINVAL;
+
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = qemu_get_be32(f);
+ }
+ cpsr_write(env, qemu_get_be32(f), 0xffffffff);
+ env->spsr = qemu_get_be32(f);
+ for (i = 0; i < 6; i++) {
+ env->banked_spsr[i] = qemu_get_be32(f);
+ env->banked_r13[i] = qemu_get_be32(f);
+ env->banked_r14[i] = qemu_get_be32(f);
+ }
+ for (i = 0; i < 5; i++) {
+ env->usr_regs[i] = qemu_get_be32(f);
+ env->fiq_regs[i] = qemu_get_be32(f);
+ }
+ env->cp15.c0_cpuid = qemu_get_be32(f);
+ env->cp15.c0_cachetype = qemu_get_be32(f);
+ env->cp15.c1_sys = qemu_get_be32(f);
+ env->cp15.c1_coproc = qemu_get_be32(f);
+ env->cp15.c1_xscaleauxcr = qemu_get_be32(f);
+ env->cp15.c2_base0 = qemu_get_be32(f);
+ env->cp15.c2_base1 = qemu_get_be32(f);
+ env->cp15.c2_mask = qemu_get_be32(f);
+ env->cp15.c2_data = qemu_get_be32(f);
+ env->cp15.c2_insn = qemu_get_be32(f);
+ env->cp15.c3 = qemu_get_be32(f);
+ env->cp15.c5_insn = qemu_get_be32(f);
+ env->cp15.c5_data = qemu_get_be32(f);
+ for (i = 0; i < 8; i++) {
+ env->cp15.c6_region[i] = qemu_get_be32(f);
+ }
+ env->cp15.c6_insn = qemu_get_be32(f);
+ env->cp15.c6_data = qemu_get_be32(f);
+ env->cp15.c9_insn = qemu_get_be32(f);
+ env->cp15.c9_data = qemu_get_be32(f);
+ env->cp15.c13_fcse = qemu_get_be32(f);
+ env->cp15.c13_context = qemu_get_be32(f);
+ env->cp15.c13_tls1 = qemu_get_be32(f);
+ env->cp15.c13_tls2 = qemu_get_be32(f);
+ env->cp15.c13_tls3 = qemu_get_be32(f);
+ env->cp15.c15_cpar = qemu_get_be32(f);
+
+ env->features = qemu_get_be32(f);
+
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ for (i = 0; i < 16; i++) {
+ CPU_DoubleU u;
+ u.l.upper = qemu_get_be32(f);
+ u.l.lower = qemu_get_be32(f);
+ env->vfp.regs[i] = u.d;
+ }
+ for (i = 0; i < 16; i++) {
+ env->vfp.xregs[i] = qemu_get_be32(f);
+ }
+
+ /* TODO: Should use proper FPSCR access functions. */
+ env->vfp.vec_len = qemu_get_be32(f);
+ env->vfp.vec_stride = qemu_get_be32(f);
+
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ for (i = 0; i < 16; i++) {
+ CPU_DoubleU u;
+ u.l.upper = qemu_get_be32(f);
+ u.l.lower = qemu_get_be32(f);
+ env->vfp.regs[i] = u.d;
+ }
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ for (i = 0; i < 16; i++) {
+ env->iwmmxt.regs[i] = qemu_get_be64(f);
+ }
+ for (i = 0; i < 16; i++) {
+ env->iwmmxt.cregs[i] = qemu_get_be32(f);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ env->v7m.other_sp = qemu_get_be32(f);
+ env->v7m.vecbase = qemu_get_be32(f);
+ env->v7m.basepri = qemu_get_be32(f);
+ env->v7m.control = qemu_get_be32(f);
+ env->v7m.current_sp = qemu_get_be32(f);
+ env->v7m.exception = qemu_get_be32(f);
+ }
+
+ return 0;
+}
+
+
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
new file mode 100644
index 0000000..4ee5658
--- /dev/null
+++ b/target-arm/neon_helper.c
@@ -0,0 +1,1457 @@
+/*
+ * ARM NEON vector operations.
+ *
+ * Copyright (c) 2007, 2008 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licenced under the GNU GPL v2.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "helpers.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
+
+static float_status neon_float_status;
+#define NFS &neon_float_status
+
+/* Helper routines to perform bitwise copies between float and int. */
+static inline float32 vfp_itos(uint32_t i)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.i = i;
+ return v.s;
+}
+
+static inline uint32_t vfp_stoi(float32 s)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.s = s;
+ return v.i;
+}
+
+#define NEON_TYPE1(name, type) \
+typedef struct \
+{ \
+ type v1; \
+} neon_##name;
+#ifdef WORDS_BIGENDIAN
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v2; \
+ type v1; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v4; \
+ type v3; \
+ type v2; \
+ type v1; \
+} neon_##name;
+#else
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+ type v3; \
+ type v4; \
+} neon_##name;
+#endif
+
+NEON_TYPE4(s8, int8_t)
+NEON_TYPE4(u8, uint8_t)
+NEON_TYPE2(s16, int16_t)
+NEON_TYPE2(u16, uint16_t)
+NEON_TYPE1(s32, int32_t)
+NEON_TYPE1(u32, uint32_t)
+#undef NEON_TYPE4
+#undef NEON_TYPE2
+#undef NEON_TYPE1
+
+/* Copy from a uint32_t to a vector structure type. */
+#define NEON_UNPACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.i = (val); \
+ dest = conv_u.v; \
+ } while(0)
+
+/* Copy from a vector structure type to a uint32_t. */
+#define NEON_PACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.v = (val); \
+ dest = conv_u.i; \
+ } while(0)
+
+#define NEON_DO1 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
+#define NEON_DO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
+#define NEON_DO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
+ NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
+ NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
+
+#define NEON_VOP_BODY(vtype, n) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+#define NEON_VOP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+#define NEON_VOP_ENV(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+/* Pairwise operations. */
+/* For 32-bit elements each segment only contains a single element, so
+ the elementwise and pairwise operations are the same. */
+#define NEON_PDO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
+#define NEON_PDO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
+ NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
+ NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
+
+#define NEON_POP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_PDO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+/* Unary operators. */
+#define NEON_VOP1(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
+{ \
+ vtype vsrc1; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, arg, vdest); \
+ return arg; \
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qadd_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qadd_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 > 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qadd_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qadd_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = 0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qsub_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qsub_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 < 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qsub_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qsub_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
+NEON_VOP(hadd_s8, neon_s8, 4)
+NEON_VOP(hadd_u8, neon_u8, 4)
+NEON_VOP(hadd_s16, neon_s16, 2)
+NEON_VOP(hadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
+NEON_VOP(rhadd_s8, neon_s8, 4)
+NEON_VOP(rhadd_u8, neon_u8, 4)
+NEON_VOP(rhadd_s16, neon_s16, 2)
+NEON_VOP(rhadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
+NEON_VOP(hsub_s8, neon_s8, 4)
+NEON_VOP(hsub_u8, neon_u8, 4)
+NEON_VOP(hsub_s16, neon_s16, 2)
+NEON_VOP(hsub_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
+NEON_VOP(cgt_s8, neon_s8, 4)
+NEON_VOP(cgt_u8, neon_u8, 4)
+NEON_VOP(cgt_s16, neon_s16, 2)
+NEON_VOP(cgt_u16, neon_u16, 2)
+NEON_VOP(cgt_s32, neon_s32, 1)
+NEON_VOP(cgt_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
+NEON_VOP(cge_s8, neon_s8, 4)
+NEON_VOP(cge_u8, neon_u8, 4)
+NEON_VOP(cge_s16, neon_s16, 2)
+NEON_VOP(cge_u16, neon_u16, 2)
+NEON_VOP(cge_s32, neon_s32, 1)
+NEON_VOP(cge_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
+NEON_VOP(min_s8, neon_s8, 4)
+NEON_VOP(min_u8, neon_u8, 4)
+NEON_VOP(min_s16, neon_s16, 2)
+NEON_VOP(min_u16, neon_u16, 2)
+NEON_VOP(min_s32, neon_s32, 1)
+NEON_VOP(min_u32, neon_u32, 1)
+NEON_POP(pmin_s8, neon_s8, 4)
+NEON_POP(pmin_u8, neon_u8, 4)
+NEON_POP(pmin_s16, neon_s16, 2)
+NEON_POP(pmin_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
+NEON_VOP(max_s8, neon_s8, 4)
+NEON_VOP(max_u8, neon_u8, 4)
+NEON_VOP(max_s16, neon_s16, 2)
+NEON_VOP(max_u16, neon_u16, 2)
+NEON_VOP(max_s32, neon_s32, 1)
+NEON_VOP(max_u32, neon_u32, 1)
+NEON_POP(pmax_s8, neon_s8, 4)
+NEON_POP(pmax_u8, neon_u8, 4)
+NEON_POP(pmax_s16, neon_s16, 2)
+NEON_POP(pmax_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
+NEON_VOP(abd_s8, neon_s8, 4)
+NEON_VOP(abd_u8, neon_u8, 4)
+NEON_VOP(abd_s16, neon_s16, 2)
+NEON_VOP(abd_u16, neon_u16, 2)
+NEON_VOP(abd_s32, neon_s32, 1)
+NEON_VOP(abd_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8 || tmp <= -sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(shl_u8, neon_u8, 4)
+NEON_VOP(shl_u16, neon_u16, 2)
+NEON_VOP(shl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 64 || shift <= -64) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp <= -sizeof(src1) * 8) { \
+ dest = src1 >> (sizeof(src1) * 8 - 1); \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(shl_s8, neon_s8, 4)
+NEON_VOP(shl_s16, neon_s16, 2)
+NEON_VOP(shl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ int64_t val = valop;
+ if (shift >= 64) {
+ val = 0;
+ } else if (shift <= -64) {
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < -sizeof(src1) * 8) { \
+ dest >>= sizeof(src1) * 8 - 1; \
+ } else if (tmp == -sizeof(src1) * 8) { \
+ dest = src1 >> (tmp - 1); \
+ dest++; \
+ src2 >>= 1; \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(rshl_s8, neon_s8, 4)
+NEON_VOP(rshl_s16, neon_s16, 2)
+NEON_VOP(rshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ int64_t val = valop;
+ if (shift >= 64) {
+ val = 0;
+ } else if (shift < -64) {
+ val >>= 63;
+ } else if (shift == -63) {
+ val >>= 63;
+ val++;
+ val >>= 1;
+ } else if (shift < 0) {
+ val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8 || tmp < -sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp == -sizeof(src1) * 8) { \
+ dest = src1 >> (tmp - 1); \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(rshl_u8, neon_u8, 4)
+NEON_VOP(rshl_u16, neon_u16, 2)
+NEON_VOP(rshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ if (shift >= 64 || shift < 64) {
+ val = 0;
+ } else if (shift == -64) {
+ /* Rounding a 1-bit result just preserves that bit. */
+ val >>= 63;
+ } if (shift < 0) {
+ val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
+ val >>= -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = 0; \
+ } \
+ } else if (tmp <= -sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 64) {
+ if (val) {
+ val = ~(uint64_t)0;
+ SET_QC();
+ } else {
+ val = 0;
+ }
+ } else if (shift <= -64) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ uint64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = ~(uint64_t)0;
+ }
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= sizeof(src1) * 8) { \
+ if (src1) \
+ SET_QC(); \
+ dest = src1 >> 31; \
+ } else if (tmp <= -sizeof(src1) * 8) { \
+ dest = src1 >> 31; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = src2 >> 31; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ int64_t val = valop;
+ if (shift >= 64) {
+ if (val) {
+ SET_QC();
+ val = (val >> 63) & ~SIGNBIT64;
+ }
+ } else if (shift <= 64) {
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ int64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = (tmp >> 63) ^ ~SIGNBIT64;
+ }
+ }
+ return val;
+}
+
+
+/* FIXME: This is wrong. */
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qrshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift < 0) {
+ val = (val + (1 << (-1 - shift))) >> -shift;
+ } else { \
+ uint64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = ~0;
+ }
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = src1 >> 31; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qrshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ int64_t val = valop;
+
+ if (shift < 0) {
+ val = (val + (1 << (-1 - shift))) >> -shift;
+ } else {
+ int64_t tmp = val;;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = tmp >> 31;
+ }
+ }
+ return val;
+}
+
+uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80808080u;
+ a &= ~0x80808080u;
+ b &= ~0x80808080u;
+ return (a + b) ^ mask;
+}
+
+uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80008000u;
+ a &= ~0x80008000u;
+ b &= ~0x80008000u;
+ return (a + b) ^ mask;
+}
+
+#define NEON_FN(dest, src1, src2) dest = src1 + src2
+NEON_POP(padd_u8, neon_u8, 4)
+NEON_POP(padd_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 - src2
+NEON_VOP(sub_u8, neon_u8, 4)
+NEON_VOP(sub_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 * src2
+NEON_VOP(mul_u8, neon_u8, 4)
+NEON_VOP(mul_u16, neon_u16, 2)
+#undef NEON_FN
+
+/* Polynomial multiplication is like integer multiplication except the
+ partial products are XORed, not added. */
+uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
+{
+ uint32_t mask;
+ uint32_t result;
+ result = 0;
+ while (op1) {
+ mask = 0;
+ if (op1 & 1)
+ mask |= 0xff;
+ if (op1 & (1 << 8))
+ mask |= (0xff << 8);
+ if (op1 & (1 << 16))
+ mask |= (0xff << 16);
+ if (op1 & (1 << 24))
+ mask |= (0xff << 24);
+ result ^= op2 & mask;
+ op1 = (op1 >> 1) & 0x7f7f7f7f;
+ op2 = (op2 << 1) & 0xfefefefe;
+ }
+ return result;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
+NEON_VOP(tst_u8, neon_u8, 4)
+NEON_VOP(tst_u16, neon_u16, 2)
+NEON_VOP(tst_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
+NEON_VOP(ceq_u8, neon_u8, 4)
+NEON_VOP(ceq_u16, neon_u16, 2)
+NEON_VOP(ceq_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
+NEON_VOP1(abs_s8, neon_s8, 4)
+NEON_VOP1(abs_s16, neon_s16, 2)
+#undef NEON_FN
+
+/* Count Leading Sign/Zero Bits. */
+static inline int do_clz8(uint8_t x)
+{
+ int n;
+ for (n = 8; x; n--)
+ x >>= 1;
+ return n;
+}
+
+static inline int do_clz16(uint16_t x)
+{
+ int n;
+ for (n = 16; x; n--)
+ x >>= 1;
+ return n;
+}
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
+NEON_VOP1(clz_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
+NEON_VOP1(clz_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_cls_s32)(uint32_t x)
+{
+ int count;
+ if ((int32_t)x < 0)
+ x = ~x;
+ for (count = 32; x; count--)
+ x = x >> 1;
+ return count - 1;
+}
+
+/* Bit count. */
+uint32_t HELPER(neon_cnt_u8)(uint32_t x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
+ return x;
+}
+
+#define NEON_QDMULH16(dest, src1, src2, round) do { \
+ uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
+ SET_QC(); \
+ tmp = (tmp >> 31) ^ ~SIGNBIT; \
+ } \
+ tmp <<= 1; \
+ if (round) { \
+ int32_t old = tmp; \
+ tmp += 1 << 15; \
+ if ((int32_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT - 1; \
+ } \
+ } \
+ dest = tmp >> 16; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_QDMULH16
+
+#define NEON_QDMULH32(dest, src1, src2, round) do { \
+ uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
+ SET_QC(); \
+ tmp = (tmp >> 63) ^ ~SIGNBIT64; \
+ } else { \
+ tmp <<= 1; \
+ } \
+ if (round) { \
+ int64_t old = tmp; \
+ tmp += (int64_t)1 << 31; \
+ if ((int64_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT64 - 1; \
+ } \
+ } \
+ dest = tmp >> 32; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#undef NEON_QDMULH32
+
+uint32_t HELPER(neon_narrow_u8)(uint64_t x)
+{
+ return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
+ | ((x >> 24) & 0xff000000u);
+}
+
+uint32_t HELPER(neon_narrow_u16)(uint64_t x)
+{
+ return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
+}
+
+uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
+{
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
+{
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
+{
+ x &= 0xff80ff80ff80ff80ull;
+ x += 0x0080008000800080ull;
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
+{
+ x &= 0xffff8000ffff8000ull;
+ x += 0x0000800000008000ull;
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
+{
+ int16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s != (int8_t)s) { \
+ d = (s >> 15) ^ 0x7f; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
+{
+ int32_t low;
+ int32_t high;
+ low = x;
+ if (low != (int16_t)low) {
+ low = (low >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high != (int16_t)high) {
+ high = (high >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ return (uint16_t)low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
+{
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
+{
+ if ((int64_t)x != (int32_t)x) {
+ SET_QC();
+ return (x >> 63) ^ 0x7fffffff;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_widen_u8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint8_t)x;
+ tmp = (uint8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_s8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint16_t)(int8_t)x;
+ tmp = (uint16_t)(int8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint16_t)(int8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint16_t)(int8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_u16)(uint32_t x)
+{
+ uint64_t high = (uint16_t)(x >> 16);
+ return ((uint16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_widen_s16)(uint32_t x)
+{
+ uint64_t high = (int16_t)(x >> 16);
+ return ((uint32_t)(int16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000800080008000ull;
+ a &= ~0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000000080000000ull;
+ a &= ~0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t tmp;
+ uint64_t tmp2;
+
+ tmp = a & 0x0000ffff0000ffffull;
+ tmp += (a >> 16) & 0x0000ffff0000ffffull;
+ tmp2 = b & 0xffff0000ffff0000ull;
+ tmp2 += (b << 16) & 0xffff0000ffff0000ull;
+ return ( tmp & 0xffff)
+ | ((tmp >> 16) & 0xffff0000ull)
+ | ((tmp2 << 16) & 0xffff00000000ull)
+ | ( tmp2 & 0xffff000000000000ull);
+}
+
+uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
+{
+ uint32_t low = a + (a >> 32);
+ uint32_t high = b + (b >> 32);
+ return low + ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000800080008000ull;
+ a |= 0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000000080000000ull;
+ a |= 0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
+{
+ uint32_t x, y;
+ uint32_t low, high;
+
+ x = a;
+ y = b;
+ low = x + y;
+ if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ low = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ x = a >> 32;
+ y = b >> 32;
+ high = x + y;
+ if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ high = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ return low | ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
+{
+ uint64_t result;
+
+ result = a + b;
+ if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
+ SET_QC();
+ result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
+ }
+ return result;
+}
+
+#define DO_ABD(dest, x, y, type) do { \
+ type tmp_x = x; \
+ type tmp_y = y; \
+ dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
+ } while(0)
+
+uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint8_t);
+ DO_ABD(tmp, a >> 8, b >> 8, uint8_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, uint8_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, uint8_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int8_t);
+ DO_ABD(tmp, a >> 8, b >> 8, int8_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, int8_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, int8_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint16_t);
+ DO_ABD(tmp, a >> 16, b >> 16, uint16_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int16_t);
+ DO_ABD(tmp, a >> 16, b >> 16, int16_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, uint32_t);
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, int32_t);
+ return result;
+}
+#undef DO_ABD
+
+/* Widening multiply. Named type is the source type. */
+#define DO_MULL(dest, x, y, type1, type2) do { \
+ type1 tmp_x = x; \
+ type1 tmp_y = y; \
+ dest = (type2)((type2)tmp_x * (type2)tmp_y); \
+ } while(0)
+
+uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_negl_u16)(uint64_t x)
+{
+ uint16_t tmp;
+ uint64_t result;
+ result = (uint16_t)-x;
+ tmp = -(x >> 16);
+ result |= (uint64_t)tmp << 16;
+ tmp = -(x >> 32);
+ result |= (uint64_t)tmp << 32;
+ tmp = -(x >> 48);
+ result |= (uint64_t)tmp << 48;
+ return result;
+}
+
+#include <stdio.h>
+uint64_t HELPER(neon_negl_u32)(uint64_t x)
+{
+ uint32_t low = -x;
+ uint32_t high = -(x >> 32);
+ return low | ((uint64_t)high << 32);
+}
+
+/* FIXME: There should be a native op for this. */
+uint64_t HELPER(neon_negl_u64)(uint64_t x)
+{
+ return -x;
+}
+
+/* Saturnating sign manuipulation. */
+/* ??? Make these use NEON_VOP1 */
+#define DO_QABS8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QABS8(vec.v1);
+ DO_QABS8(vec.v2);
+ DO_QABS8(vec.v3);
+ DO_QABS8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QABS8
+
+#define DO_QNEG8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QNEG8(vec.v1);
+ DO_QNEG8(vec.v2);
+ DO_QNEG8(vec.v3);
+ DO_QNEG8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QNEG8
+
+#define DO_QABS16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QABS16(vec.v1);
+ DO_QABS16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QABS16
+
+#define DO_QNEG16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QNEG16(vec.v1);
+ DO_QNEG16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QNEG16
+
+uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else if ((int32_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
+/* NEON Float helpers. */
+uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
+{
+ float32 f0 = vfp_itos(a);
+ float32 f1 = vfp_itos(b);
+ return (float32_compare_quiet(f0, f1, NFS) == -1) ? a : b;
+}
+
+uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
+{
+ float32 f0 = vfp_itos(a);
+ float32 f1 = vfp_itos(b);
+ return (float32_compare_quiet(f0, f1, NFS) == 1) ? a : b;
+}
+
+uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
+{
+ float32 f0 = vfp_itos(a);
+ float32 f1 = vfp_itos(b);
+ return vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
+ ? float32_sub(f0, f1, NFS)
+ : float32_sub(f1, f0, NFS));
+}
+
+uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
+{
+ return vfp_stoi(float32_add(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
+{
+ return vfp_stoi(float32_sub(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
+{
+ return vfp_stoi(float32_mul(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+/* Floating point comparisons produce an integer result. */
+#define NEON_VOP_FCMP(name, cmp) \
+uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
+{ \
+ if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
+ return ~0; \
+ else \
+ return 0; \
+}
+
+NEON_VOP_FCMP(ceq_f32, ==)
+NEON_VOP_FCMP(cge_f32, >=)
+NEON_VOP_FCMP(cgt_f32, >)
+
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
+{
+ float32 f0 = float32_abs(vfp_itos(a));
+ float32 f1 = float32_abs(vfp_itos(b));
+ return (float32_compare_quiet(f0, f1,NFS) >= 0) ? ~0 : 0;
+}
+
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
+{
+ float32 f0 = float32_abs(vfp_itos(a));
+ float32 f1 = float32_abs(vfp_itos(b));
+ return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;
+}
diff --git a/target-arm/nwfpe/double_cpdo.c b/target-arm/nwfpe/double_cpdo.c
deleted file mode 100644
index 944083a..0000000
--- a/target-arm/nwfpe/double_cpdo.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-
-float64 float64_exp(float64 Fm);
-float64 float64_ln(float64 Fm);
-float64 float64_sin(float64 rFm);
-float64 float64_cos(float64 rFm);
-float64 float64_arcsin(float64 rFm);
-float64 float64_arctan(float64 rFm);
-float64 float64_log(float64 rFm);
-float64 float64_tan(float64 rFm);
-float64 float64_arccos(float64 rFm);
-float64 float64_pow(float64 rFn,float64 rFm);
-float64 float64_pol(float64 rFn,float64 rFm);
-
-unsigned int DoubleCPDO(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- float64 rFm, rFn = 0;
- unsigned int Fd, Fm, Fn, nRc = 1;
-
- //printk("DoubleCPDO(0x%08x)\n",opcode);
-
- Fm = getFm(opcode);
- if (CONSTANT_FM(opcode))
- {
- rFm = getDoubleConstant(Fm);
- }
- else
- {
- switch (fpa11->fType[Fm])
- {
- case typeSingle:
- rFm = float32_to_float64(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- rFm = fpa11->fpreg[Fm].fDouble;
- break;
-
- case typeExtended:
- // !! patb
- //printk("not implemented! why not?\n");
- //!! ScottB
- // should never get here, if extended involved
- // then other operand should be promoted then
- // ExtendedCPDO called.
- break;
-
- default: return 0;
- }
- }
-
- if (!MONADIC_INSTRUCTION(opcode))
- {
- Fn = getFn(opcode);
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- rFn = float32_to_float64(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- rFn = fpa11->fpreg[Fn].fDouble;
- break;
-
- default: return 0;
- }
- }
-
- Fd = getFd(opcode);
- /* !! this switch isn't optimized; better (opcode & MASK_ARITHMETIC_OPCODE)>>24, sort of */
- switch (opcode & MASK_ARITHMETIC_OPCODE)
- {
- /* dyadic opcodes */
- case ADF_CODE:
- fpa11->fpreg[Fd].fDouble = float64_add(rFn,rFm, &fpa11->fp_status);
- break;
-
- case MUF_CODE:
- case FML_CODE:
- fpa11->fpreg[Fd].fDouble = float64_mul(rFn,rFm, &fpa11->fp_status);
- break;
-
- case SUF_CODE:
- fpa11->fpreg[Fd].fDouble = float64_sub(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RSF_CODE:
- fpa11->fpreg[Fd].fDouble = float64_sub(rFm,rFn, &fpa11->fp_status);
- break;
-
- case DVF_CODE:
- case FDV_CODE:
- fpa11->fpreg[Fd].fDouble = float64_div(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RDF_CODE:
- case FRD_CODE:
- fpa11->fpreg[Fd].fDouble = float64_div(rFm,rFn, &fpa11->fp_status);
- break;
-
-#if 0
- case POW_CODE:
- fpa11->fpreg[Fd].fDouble = float64_pow(rFn,rFm);
- break;
-
- case RPW_CODE:
- fpa11->fpreg[Fd].fDouble = float64_pow(rFm,rFn);
- break;
-#endif
-
- case RMF_CODE:
- fpa11->fpreg[Fd].fDouble = float64_rem(rFn,rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case POL_CODE:
- fpa11->fpreg[Fd].fDouble = float64_pol(rFn,rFm);
- break;
-#endif
-
- /* monadic opcodes */
- case MVF_CODE:
- fpa11->fpreg[Fd].fDouble = rFm;
- break;
-
- case MNF_CODE:
- {
- unsigned int *p = (unsigned int*)&rFm;
-#ifdef WORDS_BIGENDIAN
- p[0] ^= 0x80000000;
-#else
- p[1] ^= 0x80000000;
-#endif
- fpa11->fpreg[Fd].fDouble = rFm;
- }
- break;
-
- case ABS_CODE:
- {
- unsigned int *p = (unsigned int*)&rFm;
-#ifdef WORDS_BIGENDIAN
- p[0] &= 0x7fffffff;
-#else
- p[1] &= 0x7fffffff;
-#endif
- fpa11->fpreg[Fd].fDouble = rFm;
- }
- break;
-
- case RND_CODE:
- case URD_CODE:
- fpa11->fpreg[Fd].fDouble = float64_round_to_int(rFm, &fpa11->fp_status);
- break;
-
- case SQT_CODE:
- fpa11->fpreg[Fd].fDouble = float64_sqrt(rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case LOG_CODE:
- fpa11->fpreg[Fd].fDouble = float64_log(rFm);
- break;
-
- case LGN_CODE:
- fpa11->fpreg[Fd].fDouble = float64_ln(rFm);
- break;
-
- case EXP_CODE:
- fpa11->fpreg[Fd].fDouble = float64_exp(rFm);
- break;
-
- case SIN_CODE:
- fpa11->fpreg[Fd].fDouble = float64_sin(rFm);
- break;
-
- case COS_CODE:
- fpa11->fpreg[Fd].fDouble = float64_cos(rFm);
- break;
-
- case TAN_CODE:
- fpa11->fpreg[Fd].fDouble = float64_tan(rFm);
- break;
-
- case ASN_CODE:
- fpa11->fpreg[Fd].fDouble = float64_arcsin(rFm);
- break;
-
- case ACS_CODE:
- fpa11->fpreg[Fd].fDouble = float64_arccos(rFm);
- break;
-
- case ATN_CODE:
- fpa11->fpreg[Fd].fDouble = float64_arctan(rFm);
- break;
-#endif
-
- case NRM_CODE:
- break;
-
- default:
- {
- nRc = 0;
- }
- }
-
- if (0 != nRc) fpa11->fType[Fd] = typeDouble;
- return nRc;
-}
-
-#if 0
-float64 float64_exp(float64 rFm)
-{
- return rFm;
-//series
-}
-
-float64 float64_ln(float64 rFm)
-{
- return rFm;
-//series
-}
-
-float64 float64_sin(float64 rFm)
-{
- return rFm;
-//series
-}
-
-float64 float64_cos(float64 rFm)
-{
- return rFm;
- //series
-}
-
-#if 0
-float64 float64_arcsin(float64 rFm)
-{
-//series
-}
-
-float64 float64_arctan(float64 rFm)
-{
- //series
-}
-#endif
-
-float64 float64_log(float64 rFm)
-{
- return float64_div(float64_ln(rFm),getDoubleConstant(7));
-}
-
-float64 float64_tan(float64 rFm)
-{
- return float64_div(float64_sin(rFm),float64_cos(rFm));
-}
-
-float64 float64_arccos(float64 rFm)
-{
-return rFm;
- //return float64_sub(halfPi,float64_arcsin(rFm));
-}
-
-float64 float64_pow(float64 rFn,float64 rFm)
-{
- return float64_exp(float64_mul(rFm,float64_ln(rFn)));
-}
-
-float64 float64_pol(float64 rFn,float64 rFm)
-{
- return float64_arctan(float64_div(rFn,rFm));
-}
-#endif
diff --git a/target-arm/nwfpe/extended_cpdo.c b/target-arm/nwfpe/extended_cpdo.c
deleted file mode 100644
index f5ef623..0000000
--- a/target-arm/nwfpe/extended_cpdo.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-
-floatx80 floatx80_exp(floatx80 Fm);
-floatx80 floatx80_ln(floatx80 Fm);
-floatx80 floatx80_sin(floatx80 rFm);
-floatx80 floatx80_cos(floatx80 rFm);
-floatx80 floatx80_arcsin(floatx80 rFm);
-floatx80 floatx80_arctan(floatx80 rFm);
-floatx80 floatx80_log(floatx80 rFm);
-floatx80 floatx80_tan(floatx80 rFm);
-floatx80 floatx80_arccos(floatx80 rFm);
-floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm);
-floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm);
-
-unsigned int ExtendedCPDO(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- floatx80 rFm, rFn;
- unsigned int Fd, Fm, Fn, nRc = 1;
-
- //printk("ExtendedCPDO(0x%08x)\n",opcode);
-
- Fm = getFm(opcode);
- if (CONSTANT_FM(opcode))
- {
- rFm = getExtendedConstant(Fm);
- }
- else
- {
- switch (fpa11->fType[Fm])
- {
- case typeSingle:
- rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble, &fpa11->fp_status);
- break;
-
- case typeExtended:
- rFm = fpa11->fpreg[Fm].fExtended;
- break;
-
- default: return 0;
- }
- }
-
- if (!MONADIC_INSTRUCTION(opcode))
- {
- Fn = getFn(opcode);
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status);
- break;
-
- case typeExtended:
- rFn = fpa11->fpreg[Fn].fExtended;
- break;
-
- default: return 0;
- }
- }
-
- Fd = getFd(opcode);
- switch (opcode & MASK_ARITHMETIC_OPCODE)
- {
- /* dyadic opcodes */
- case ADF_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_add(rFn,rFm, &fpa11->fp_status);
- break;
-
- case MUF_CODE:
- case FML_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_mul(rFn,rFm, &fpa11->fp_status);
- break;
-
- case SUF_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_sub(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RSF_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_sub(rFm,rFn, &fpa11->fp_status);
- break;
-
- case DVF_CODE:
- case FDV_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_div(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RDF_CODE:
- case FRD_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_div(rFm,rFn, &fpa11->fp_status);
- break;
-
-#if 0
- case POW_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_pow(rFn,rFm);
- break;
-
- case RPW_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_pow(rFm,rFn);
- break;
-#endif
-
- case RMF_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_rem(rFn,rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case POL_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_pol(rFn,rFm);
- break;
-#endif
-
- /* monadic opcodes */
- case MVF_CODE:
- fpa11->fpreg[Fd].fExtended = rFm;
- break;
-
- case MNF_CODE:
- rFm.high ^= 0x8000;
- fpa11->fpreg[Fd].fExtended = rFm;
- break;
-
- case ABS_CODE:
- rFm.high &= 0x7fff;
- fpa11->fpreg[Fd].fExtended = rFm;
- break;
-
- case RND_CODE:
- case URD_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_round_to_int(rFm, &fpa11->fp_status);
- break;
-
- case SQT_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_sqrt(rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case LOG_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_log(rFm);
- break;
-
- case LGN_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_ln(rFm);
- break;
-
- case EXP_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_exp(rFm);
- break;
-
- case SIN_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_sin(rFm);
- break;
-
- case COS_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_cos(rFm);
- break;
-
- case TAN_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_tan(rFm);
- break;
-
- case ASN_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_arcsin(rFm);
- break;
-
- case ACS_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_arccos(rFm);
- break;
-
- case ATN_CODE:
- fpa11->fpreg[Fd].fExtended = floatx80_arctan(rFm);
- break;
-#endif
-
- case NRM_CODE:
- break;
-
- default:
- {
- nRc = 0;
- }
- }
-
- if (0 != nRc) fpa11->fType[Fd] = typeExtended;
- return nRc;
-}
-
-#if 0
-floatx80 floatx80_exp(floatx80 Fm)
-{
-//series
-}
-
-floatx80 floatx80_ln(floatx80 Fm)
-{
-//series
-}
-
-floatx80 floatx80_sin(floatx80 rFm)
-{
-//series
-}
-
-floatx80 floatx80_cos(floatx80 rFm)
-{
-//series
-}
-
-floatx80 floatx80_arcsin(floatx80 rFm)
-{
-//series
-}
-
-floatx80 floatx80_arctan(floatx80 rFm)
-{
- //series
-}
-
-floatx80 floatx80_log(floatx80 rFm)
-{
- return floatx80_div(floatx80_ln(rFm),getExtendedConstant(7));
-}
-
-floatx80 floatx80_tan(floatx80 rFm)
-{
- return floatx80_div(floatx80_sin(rFm),floatx80_cos(rFm));
-}
-
-floatx80 floatx80_arccos(floatx80 rFm)
-{
- //return floatx80_sub(halfPi,floatx80_arcsin(rFm));
-}
-
-floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm)
-{
- return floatx80_exp(floatx80_mul(rFm,floatx80_ln(rFn)));
-}
-
-floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm)
-{
- return floatx80_arctan(floatx80_div(rFn,rFm));
-}
-#endif
diff --git a/target-arm/nwfpe/fpa11.c b/target-arm/nwfpe/fpa11.c
deleted file mode 100644
index a8141e7..0000000
--- a/target-arm/nwfpe/fpa11.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-
-#include "fpopcode.h"
-
-//#include "fpmodule.h"
-//#include "fpmodule.inl"
-
-//#include <asm/system.h>
-
-#include <stdio.h>
-
-/* forward declarations */
-unsigned int EmulateCPDO(const unsigned int);
-unsigned int EmulateCPDT(const unsigned int);
-unsigned int EmulateCPRT(const unsigned int);
-
-FPA11* qemufpa=0;
-CPUARMState* user_registers;
-
-/* Reset the FPA11 chip. Called to initialize and reset the emulator. */
-void resetFPA11(void)
-{
- int i;
- FPA11 *fpa11 = GET_FPA11();
-
- /* initialize the register type array */
- for (i=0;i<=7;i++)
- {
- fpa11->fType[i] = typeNone;
- }
-
- /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */
- fpa11->fpsr = FP_EMULATOR | BIT_AC;
-
- /* FPCR: set SB, AB and DA bits, clear all others */
-#if MAINTAIN_FPCR
- fpa11->fpcr = MASK_RESET;
-#endif
-}
-
-void SetRoundingMode(const unsigned int opcode)
-{
- int rounding_mode;
- FPA11 *fpa11 = GET_FPA11();
-
-#if MAINTAIN_FPCR
- fpa11->fpcr &= ~MASK_ROUNDING_MODE;
-#endif
- switch (opcode & MASK_ROUNDING_MODE)
- {
- default:
- case ROUND_TO_NEAREST:
- rounding_mode = float_round_nearest_even;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_TO_NEAREST;
-#endif
- break;
-
- case ROUND_TO_PLUS_INFINITY:
- rounding_mode = float_round_up;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_TO_PLUS_INFINITY;
-#endif
- break;
-
- case ROUND_TO_MINUS_INFINITY:
- rounding_mode = float_round_down;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_TO_MINUS_INFINITY;
-#endif
- break;
-
- case ROUND_TO_ZERO:
- rounding_mode = float_round_to_zero;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_TO_ZERO;
-#endif
- break;
- }
- set_float_rounding_mode(rounding_mode, &fpa11->fp_status);
-}
-
-void SetRoundingPrecision(const unsigned int opcode)
-{
- int rounding_precision;
- FPA11 *fpa11 = GET_FPA11();
-#if MAINTAIN_FPCR
- fpa11->fpcr &= ~MASK_ROUNDING_PRECISION;
-#endif
- switch (opcode & MASK_ROUNDING_PRECISION)
- {
- case ROUND_SINGLE:
- rounding_precision = 32;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_SINGLE;
-#endif
- break;
-
- case ROUND_DOUBLE:
- rounding_precision = 64;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_DOUBLE;
-#endif
- break;
-
- case ROUND_EXTENDED:
- rounding_precision = 80;
-#if MAINTAIN_FPCR
- fpa11->fpcr |= ROUND_EXTENDED;
-#endif
- break;
-
- default: rounding_precision = 80;
- }
- set_floatx80_rounding_precision(rounding_precision, &fpa11->fp_status);
-}
-
-/* Emulate the instruction in the opcode. */
-/* ??? This is not thread safe. */
-unsigned int EmulateAll(unsigned int opcode, FPA11* qfpa, CPUARMState* qregs)
-{
- unsigned int nRc = 0;
-// unsigned long flags;
- FPA11 *fpa11;
-// save_flags(flags); sti();
-
- qemufpa=qfpa;
- user_registers=qregs;
-
-#if 0
- fprintf(stderr,"emulating FP insn 0x%08x, PC=0x%08x\n",
- opcode, qregs[REG_PC]);
-#endif
- fpa11 = GET_FPA11();
-
- if (fpa11->initflag == 0) /* good place for __builtin_expect */
- {
- resetFPA11();
- SetRoundingMode(ROUND_TO_NEAREST);
- SetRoundingPrecision(ROUND_EXTENDED);
- fpa11->initflag = 1;
- }
-
- if (TEST_OPCODE(opcode,MASK_CPRT))
- {
- //fprintf(stderr,"emulating CPRT\n");
- /* Emulate conversion opcodes. */
- /* Emulate register transfer opcodes. */
- /* Emulate comparison opcodes. */
- nRc = EmulateCPRT(opcode);
- }
- else if (TEST_OPCODE(opcode,MASK_CPDO))
- {
- //fprintf(stderr,"emulating CPDO\n");
- /* Emulate monadic arithmetic opcodes. */
- /* Emulate dyadic arithmetic opcodes. */
- nRc = EmulateCPDO(opcode);
- }
- else if (TEST_OPCODE(opcode,MASK_CPDT))
- {
- //fprintf(stderr,"emulating CPDT\n");
- /* Emulate load/store opcodes. */
- /* Emulate load/store multiple opcodes. */
- nRc = EmulateCPDT(opcode);
- }
- else
- {
- /* Invalid instruction detected. Return FALSE. */
- nRc = 0;
- }
-
-// restore_flags(flags);
-
- //printf("returning %d\n",nRc);
- return(nRc);
-}
-
-#if 0
-unsigned int EmulateAll1(unsigned int opcode)
-{
- switch ((opcode >> 24) & 0xf)
- {
- case 0xc:
- case 0xd:
- if ((opcode >> 20) & 0x1)
- {
- switch ((opcode >> 8) & 0xf)
- {
- case 0x1: return PerformLDF(opcode); break;
- case 0x2: return PerformLFM(opcode); break;
- default: return 0;
- }
- }
- else
- {
- switch ((opcode >> 8) & 0xf)
- {
- case 0x1: return PerformSTF(opcode); break;
- case 0x2: return PerformSFM(opcode); break;
- default: return 0;
- }
- }
- break;
-
- case 0xe:
- if (opcode & 0x10)
- return EmulateCPDO(opcode);
- else
- return EmulateCPRT(opcode);
- break;
-
- default: return 0;
- }
-}
-#endif
-
diff --git a/target-arm/nwfpe/fpa11.h b/target-arm/nwfpe/fpa11.h
deleted file mode 100644
index 8751696..0000000
--- a/target-arm/nwfpe/fpa11.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.com, 1998-1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef __FPA11_H__
-#define __FPA11_H__
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-
-#include <cpu.h>
-
-#define GET_FPA11() (qemufpa)
-
-/*
- * The processes registers are always at the very top of the 8K
- * stack+task struct. Use the same method as 'current' uses to
- * reach them.
- */
-extern CPUARMState *user_registers;
-
-#define GET_USERREG() (user_registers)
-
-/* Need task_struct */
-//#include <linux/sched.h>
-
-/* includes */
-#include "fpsr.h" /* FP control and status register definitions */
-#include "softfloat.h"
-
-#define typeNone 0x00
-#define typeSingle 0x01
-#define typeDouble 0x02
-#define typeExtended 0x03
-
-/*
- * This must be no more and no less than 12 bytes.
- */
-typedef union tagFPREG {
- floatx80 fExtended;
- float64 fDouble;
- float32 fSingle;
-} FPREG;
-
-/*
- * FPA11 device model.
- *
- * This structure is exported to user space. Do not re-order.
- * Only add new stuff to the end, and do not change the size of
- * any element. Elements of this structure are used by user
- * space, and must match struct user_fp in include/asm-arm/user.h.
- * We include the byte offsets below for documentation purposes.
- *
- * The size of this structure and FPREG are checked by fpmodule.c
- * on initialisation. If the rules have been broken, NWFPE will
- * not initialise.
- */
-typedef struct tagFPA11 {
-/* 0 */ FPREG fpreg[8]; /* 8 floating point registers */
-/* 96 */ FPSR fpsr; /* floating point status register */
-/* 100 */ FPCR fpcr; /* floating point control register */
-/* 104 */ unsigned char fType[8]; /* type of floating point value held in
- floating point registers. One of none
- single, double or extended. */
-/* 112 */ int initflag; /* this is special. The kernel guarantees
- to set it to 0 when a thread is launched,
- so we can use it to detect whether this
- instance of the emulator needs to be
- initialised. */
- float_status fp_status; /* QEMU float emulator status */
-} FPA11;
-
-extern FPA11* qemufpa;
-
-extern void resetFPA11(void);
-extern void SetRoundingMode(const unsigned int);
-extern void SetRoundingPrecision(const unsigned int);
-
-static inline unsigned int readRegister(unsigned int reg)
-{
- return (user_registers->regs[(reg)]);
-}
-
-static inline void writeRegister(unsigned int x, unsigned int y)
-{
-#if 0
- printf("writing %d to r%d\n",y,x);
-#endif
- user_registers->regs[(x)]=(y);
-}
-
-static inline void writeConditionCodes(unsigned int x)
-{
- cpsr_write(user_registers,x,CPSR_NZCV);
-}
-
-#define REG_PC 15
-
-unsigned int EmulateAll(unsigned int opcode, FPA11* qfpa, CPUARMState* qregs);
-
-/* included only for get_user/put_user macros */
-#include "qemu.h"
-
-#endif
diff --git a/target-arm/nwfpe/fpa11.inl b/target-arm/nwfpe/fpa11.inl
deleted file mode 100644
index 7183ec9..0000000
--- a/target-arm/nwfpe/fpa11.inl
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-
-/* Read and write floating point status register */
-static inline unsigned int readFPSR(void)
-{
- FPA11 *fpa11 = GET_FPA11();
- return(fpa11->fpsr);
-}
-
-static inline void writeFPSR(FPSR reg)
-{
- FPA11 *fpa11 = GET_FPA11();
- /* the sysid byte in the status register is readonly */
- fpa11->fpsr = (fpa11->fpsr & MASK_SYSID) | (reg & ~MASK_SYSID);
-}
-
-/* Read and write floating point control register */
-static inline FPCR readFPCR(void)
-{
- FPA11 *fpa11 = GET_FPA11();
- /* clear SB, AB and DA bits before returning FPCR */
- return(fpa11->fpcr & ~MASK_RFC);
-}
-
-static inline void writeFPCR(FPCR reg)
-{
- FPA11 *fpa11 = GET_FPA11();
- fpa11->fpcr &= ~MASK_WFC; /* clear SB, AB and DA bits */
- fpa11->fpcr |= (reg & MASK_WFC); /* write SB, AB and DA bits */
-}
diff --git a/target-arm/nwfpe/fpa11_cpdo.c b/target-arm/nwfpe/fpa11_cpdo.c
deleted file mode 100644
index cc8aa87..0000000
--- a/target-arm/nwfpe/fpa11_cpdo.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "fpopcode.h"
-
-unsigned int SingleCPDO(const unsigned int opcode);
-unsigned int DoubleCPDO(const unsigned int opcode);
-unsigned int ExtendedCPDO(const unsigned int opcode);
-
-unsigned int EmulateCPDO(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- unsigned int Fd, nType, nDest, nRc = 1;
-
- //printk("EmulateCPDO(0x%08x)\n",opcode);
-
- /* Get the destination size. If not valid let Linux perform
- an invalid instruction trap. */
- nDest = getDestinationSize(opcode);
- if (typeNone == nDest) return 0;
-
- SetRoundingMode(opcode);
-
- /* Compare the size of the operands in Fn and Fm.
- Choose the largest size and perform operations in that size,
- in order to make use of all the precision of the operands.
- If Fm is a constant, we just grab a constant of a size
- matching the size of the operand in Fn. */
- if (MONADIC_INSTRUCTION(opcode))
- nType = nDest;
- else
- nType = fpa11->fType[getFn(opcode)];
-
- if (!CONSTANT_FM(opcode))
- {
- register unsigned int Fm = getFm(opcode);
- if (nType < fpa11->fType[Fm])
- {
- nType = fpa11->fType[Fm];
- }
- }
-
- switch (nType)
- {
- case typeSingle : nRc = SingleCPDO(opcode); break;
- case typeDouble : nRc = DoubleCPDO(opcode); break;
- case typeExtended : nRc = ExtendedCPDO(opcode); break;
- default : nRc = 0;
- }
-
- /* If the operation succeeded, check to see if the result in the
- destination register is the correct size. If not force it
- to be. */
- Fd = getFd(opcode);
- nType = fpa11->fType[Fd];
- if ((0 != nRc) && (nDest != nType))
- {
- switch (nDest)
- {
- case typeSingle:
- {
- if (typeDouble == nType)
- fpa11->fpreg[Fd].fSingle =
- float64_to_float32(fpa11->fpreg[Fd].fDouble, &fpa11->fp_status);
- else
- fpa11->fpreg[Fd].fSingle =
- floatx80_to_float32(fpa11->fpreg[Fd].fExtended, &fpa11->fp_status);
- }
- break;
-
- case typeDouble:
- {
- if (typeSingle == nType)
- fpa11->fpreg[Fd].fDouble =
- float32_to_float64(fpa11->fpreg[Fd].fSingle, &fpa11->fp_status);
- else
- fpa11->fpreg[Fd].fDouble =
- floatx80_to_float64(fpa11->fpreg[Fd].fExtended, &fpa11->fp_status);
- }
- break;
-
- case typeExtended:
- {
- if (typeSingle == nType)
- fpa11->fpreg[Fd].fExtended =
- float32_to_floatx80(fpa11->fpreg[Fd].fSingle, &fpa11->fp_status);
- else
- fpa11->fpreg[Fd].fExtended =
- float64_to_floatx80(fpa11->fpreg[Fd].fDouble, &fpa11->fp_status);
- }
- break;
- }
-
- fpa11->fType[Fd] = nDest;
- }
-
- return nRc;
-}
diff --git a/target-arm/nwfpe/fpa11_cpdt.c b/target-arm/nwfpe/fpa11_cpdt.c
deleted file mode 100644
index 914a86f..0000000
--- a/target-arm/nwfpe/fpa11_cpdt.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.com, 1998-1999
- (c) Philip Blundell, 1998
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-//#include "fpmodule.h"
-//#include "fpmodule.inl"
-
-//#include <asm/uaccess.h>
-
-static inline
-void loadSingle(const unsigned int Fn,const unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- fpa11->fType[Fn] = typeSingle;
- fpa11->fpreg[Fn].fSingle = tget32(addr);
-}
-
-static inline
-void loadDouble(const unsigned int Fn,const unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- unsigned int *p;
- p = (unsigned int*)&fpa11->fpreg[Fn].fDouble;
- fpa11->fType[Fn] = typeDouble;
-#ifdef WORDS_BIGENDIAN
- p[0] = tget32(addr); /* sign & exponent */
- p[1] = tget32(addr + 4);
-#else
- p[0] = tget32(addr + 4);
- p[1] = tget32(addr); /* sign & exponent */
-#endif
-}
-
-static inline
-void loadExtended(const unsigned int Fn,const unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- unsigned int *p;
- p = (unsigned int*)&fpa11->fpreg[Fn].fExtended;
- fpa11->fType[Fn] = typeExtended;
- p[0] = tget32(addr); /* sign & exponent */
- p[1] = tget32(addr + 8); /* ls bits */
- p[2] = tget32(addr + 4); /* ms bits */
-}
-
-static inline
-void loadMultiple(const unsigned int Fn,const unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- register unsigned int *p;
- unsigned long x;
-
- p = (unsigned int*)&(fpa11->fpreg[Fn]);
- x = tget32(addr);
- fpa11->fType[Fn] = (x >> 14) & 0x00000003;
-
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- case typeDouble:
- {
- p[0] = tget32(addr + 8); /* Single */
- p[1] = tget32(addr + 4); /* double msw */
- p[2] = 0; /* empty */
- }
- break;
-
- case typeExtended:
- {
- p[1] = tget32(addr + 8);
- p[2] = tget32(addr + 4); /* msw */
- p[0] = (x & 0x80003fff);
- }
- break;
- }
-}
-
-static inline
-void storeSingle(const unsigned int Fn,unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- float32 val;
- register unsigned int *p = (unsigned int*)&val;
-
- switch (fpa11->fType[Fn])
- {
- case typeDouble:
- val = float64_to_float32(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status);
- break;
-
- case typeExtended:
- val = floatx80_to_float32(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status);
- break;
-
- default: val = fpa11->fpreg[Fn].fSingle;
- }
-
- tput32(addr, p[0]);
-}
-
-static inline
-void storeDouble(const unsigned int Fn,unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- float64 val;
- register unsigned int *p = (unsigned int*)&val;
-
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- val = float32_to_float64(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status);
- break;
-
- case typeExtended:
- val = floatx80_to_float64(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status);
- break;
-
- default: val = fpa11->fpreg[Fn].fDouble;
- }
-#ifdef WORDS_BIGENDIAN
- tput32(addr, p[0]); /* msw */
- tput32(addr + 4, p[1]); /* lsw */
-#else
- tput32(addr, p[1]); /* msw */
- tput32(addr + 4, p[0]); /* lsw */
-#endif
-}
-
-static inline
-void storeExtended(const unsigned int Fn,unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- floatx80 val;
- register unsigned int *p = (unsigned int*)&val;
-
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- val = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- val = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status);
- break;
-
- default: val = fpa11->fpreg[Fn].fExtended;
- }
-
- tput32(addr, p[0]); /* sign & exp */
- tput32(addr + 8, p[1]);
- tput32(addr + 4, p[2]); /* msw */
-}
-
-static inline
-void storeMultiple(const unsigned int Fn,unsigned int *pMem)
-{
- target_ulong addr = (target_ulong)(long)pMem;
- FPA11 *fpa11 = GET_FPA11();
- register unsigned int nType, *p;
-
- p = (unsigned int*)&(fpa11->fpreg[Fn]);
- nType = fpa11->fType[Fn];
-
- switch (nType)
- {
- case typeSingle:
- case typeDouble:
- {
- tput32(addr + 8, p[0]); /* single */
- tput32(addr + 4, p[1]); /* double msw */
- tput32(addr, nType << 14);
- }
- break;
-
- case typeExtended:
- {
- tput32(addr + 4, p[2]); /* msw */
- tput32(addr + 8, p[1]);
- tput32(addr, (p[0] & 0x80003fff) | (nType << 14));
- }
- break;
- }
-}
-
-unsigned int PerformLDF(const unsigned int opcode)
-{
- unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
- write_back = WRITE_BACK(opcode);
-
- //printk("PerformLDF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode));
-
- pBase = (unsigned int*)readRegister(getRn(opcode));
- if (REG_PC == getRn(opcode))
- {
- pBase += 2;
- write_back = 0;
- }
-
- pFinal = pBase;
- if (BIT_UP_SET(opcode))
- pFinal += getOffset(opcode);
- else
- pFinal -= getOffset(opcode);
-
- if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase;
-
- switch (opcode & MASK_TRANSFER_LENGTH)
- {
- case TRANSFER_SINGLE : loadSingle(getFd(opcode),pAddress); break;
- case TRANSFER_DOUBLE : loadDouble(getFd(opcode),pAddress); break;
- case TRANSFER_EXTENDED: loadExtended(getFd(opcode),pAddress); break;
- default: nRc = 0;
- }
-
- if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal);
- return nRc;
-}
-
-unsigned int PerformSTF(const unsigned int opcode)
-{
- unsigned int *pBase, *pAddress, *pFinal, nRc = 1,
- write_back = WRITE_BACK(opcode);
-
- //printk("PerformSTF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode));
- SetRoundingMode(ROUND_TO_NEAREST);
-
- pBase = (unsigned int*)readRegister(getRn(opcode));
- if (REG_PC == getRn(opcode))
- {
- pBase += 2;
- write_back = 0;
- }
-
- pFinal = pBase;
- if (BIT_UP_SET(opcode))
- pFinal += getOffset(opcode);
- else
- pFinal -= getOffset(opcode);
-
- if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase;
-
- switch (opcode & MASK_TRANSFER_LENGTH)
- {
- case TRANSFER_SINGLE : storeSingle(getFd(opcode),pAddress); break;
- case TRANSFER_DOUBLE : storeDouble(getFd(opcode),pAddress); break;
- case TRANSFER_EXTENDED: storeExtended(getFd(opcode),pAddress); break;
- default: nRc = 0;
- }
-
- if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal);
- return nRc;
-}
-
-unsigned int PerformLFM(const unsigned int opcode)
-{
- unsigned int i, Fd, *pBase, *pAddress, *pFinal,
- write_back = WRITE_BACK(opcode);
-
- pBase = (unsigned int*)readRegister(getRn(opcode));
- if (REG_PC == getRn(opcode))
- {
- pBase += 2;
- write_back = 0;
- }
-
- pFinal = pBase;
- if (BIT_UP_SET(opcode))
- pFinal += getOffset(opcode);
- else
- pFinal -= getOffset(opcode);
-
- if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase;
-
- Fd = getFd(opcode);
- for (i=getRegisterCount(opcode);i>0;i--)
- {
- loadMultiple(Fd,pAddress);
- pAddress += 3; Fd++;
- if (Fd == 8) Fd = 0;
- }
-
- if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal);
- return 1;
-}
-
-unsigned int PerformSFM(const unsigned int opcode)
-{
- unsigned int i, Fd, *pBase, *pAddress, *pFinal,
- write_back = WRITE_BACK(opcode);
-
- pBase = (unsigned int*)readRegister(getRn(opcode));
- if (REG_PC == getRn(opcode))
- {
- pBase += 2;
- write_back = 0;
- }
-
- pFinal = pBase;
- if (BIT_UP_SET(opcode))
- pFinal += getOffset(opcode);
- else
- pFinal -= getOffset(opcode);
-
- if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase;
-
- Fd = getFd(opcode);
- for (i=getRegisterCount(opcode);i>0;i--)
- {
- storeMultiple(Fd,pAddress);
- pAddress += 3; Fd++;
- if (Fd == 8) Fd = 0;
- }
-
- if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal);
- return 1;
-}
-
-#if 1
-unsigned int EmulateCPDT(const unsigned int opcode)
-{
- unsigned int nRc = 0;
-
- //printk("EmulateCPDT(0x%08x)\n",opcode);
-
- if (LDF_OP(opcode))
- {
- nRc = PerformLDF(opcode);
- }
- else if (LFM_OP(opcode))
- {
- nRc = PerformLFM(opcode);
- }
- else if (STF_OP(opcode))
- {
- nRc = PerformSTF(opcode);
- }
- else if (SFM_OP(opcode))
- {
- nRc = PerformSFM(opcode);
- }
- else
- {
- nRc = 0;
- }
-
- return nRc;
-}
-#endif
diff --git a/target-arm/nwfpe/fpa11_cprt.c b/target-arm/nwfpe/fpa11_cprt.c
deleted file mode 100644
index 91f2d80..0000000
--- a/target-arm/nwfpe/fpa11_cprt.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
- (c) Philip Blundell, 1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-#include "fpa11.inl"
-//#include "fpmodule.h"
-//#include "fpmodule.inl"
-
-extern flag floatx80_is_nan(floatx80);
-extern flag float64_is_nan( float64);
-extern flag float32_is_nan( float32);
-
-void SetRoundingMode(const unsigned int opcode);
-
-unsigned int PerformFLT(const unsigned int opcode);
-unsigned int PerformFIX(const unsigned int opcode);
-
-static unsigned int
-PerformComparison(const unsigned int opcode);
-
-unsigned int EmulateCPRT(const unsigned int opcode)
-{
- unsigned int nRc = 1;
-
- //printk("EmulateCPRT(0x%08x)\n",opcode);
-
- if (opcode & 0x800000)
- {
- /* This is some variant of a comparison (PerformComparison will
- sort out which one). Since most of the other CPRT
- instructions are oddball cases of some sort or other it makes
- sense to pull this out into a fast path. */
- return PerformComparison(opcode);
- }
-
- /* Hint to GCC that we'd like a jump table rather than a load of CMPs */
- switch ((opcode & 0x700000) >> 20)
- {
- case FLT_CODE >> 20: nRc = PerformFLT(opcode); break;
- case FIX_CODE >> 20: nRc = PerformFIX(opcode); break;
-
- case WFS_CODE >> 20: writeFPSR(readRegister(getRd(opcode))); break;
- case RFS_CODE >> 20: writeRegister(getRd(opcode),readFPSR()); break;
-
-#if 0 /* We currently have no use for the FPCR, so there's no point
- in emulating it. */
- case WFC_CODE >> 20: writeFPCR(readRegister(getRd(opcode)));
- case RFC_CODE >> 20: writeRegister(getRd(opcode),readFPCR()); break;
-#endif
-
- default: nRc = 0;
- }
-
- return nRc;
-}
-
-unsigned int PerformFLT(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
-
- unsigned int nRc = 1;
- SetRoundingMode(opcode);
-
- switch (opcode & MASK_ROUNDING_PRECISION)
- {
- case ROUND_SINGLE:
- {
- fpa11->fType[getFn(opcode)] = typeSingle;
- fpa11->fpreg[getFn(opcode)].fSingle =
- int32_to_float32(readRegister(getRd(opcode)), &fpa11->fp_status);
- }
- break;
-
- case ROUND_DOUBLE:
- {
- fpa11->fType[getFn(opcode)] = typeDouble;
- fpa11->fpreg[getFn(opcode)].fDouble =
- int32_to_float64(readRegister(getRd(opcode)), &fpa11->fp_status);
- }
- break;
-
- case ROUND_EXTENDED:
- {
- fpa11->fType[getFn(opcode)] = typeExtended;
- fpa11->fpreg[getFn(opcode)].fExtended =
- int32_to_floatx80(readRegister(getRd(opcode)), &fpa11->fp_status);
- }
- break;
-
- default: nRc = 0;
- }
-
- return nRc;
-}
-
-unsigned int PerformFIX(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- unsigned int nRc = 1;
- unsigned int Fn = getFm(opcode);
-
- SetRoundingMode(opcode);
-
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- {
- writeRegister(getRd(opcode),
- float32_to_int32(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status));
- }
- break;
-
- case typeDouble:
- {
- //printf("F%d is 0x%" PRIx64 "\n",Fn,fpa11->fpreg[Fn].fDouble);
- writeRegister(getRd(opcode),
- float64_to_int32(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status));
- }
- break;
-
- case typeExtended:
- {
- writeRegister(getRd(opcode),
- floatx80_to_int32(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status));
- }
- break;
-
- default: nRc = 0;
- }
-
- return nRc;
-}
-
-
-static unsigned int __inline__
-PerformComparisonOperation(floatx80 Fn, floatx80 Fm)
-{
- FPA11 *fpa11 = GET_FPA11();
- unsigned int flags = 0;
-
- /* test for less than condition */
- if (floatx80_lt(Fn,Fm, &fpa11->fp_status))
- {
- flags |= CC_NEGATIVE;
- }
-
- /* test for equal condition */
- if (floatx80_eq(Fn,Fm, &fpa11->fp_status))
- {
- flags |= CC_ZERO;
- }
-
- /* test for greater than or equal condition */
- if (floatx80_lt(Fm,Fn, &fpa11->fp_status))
- {
- flags |= CC_CARRY;
- }
-
- writeConditionCodes(flags);
- return 1;
-}
-
-/* This instruction sets the flags N, Z, C, V in the FPSR. */
-
-static unsigned int PerformComparison(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- unsigned int Fn, Fm;
- floatx80 rFn, rFm;
- int e_flag = opcode & 0x400000; /* 1 if CxFE */
- int n_flag = opcode & 0x200000; /* 1 if CNxx */
- unsigned int flags = 0;
-
- //printk("PerformComparison(0x%08x)\n",opcode);
-
- Fn = getFn(opcode);
- Fm = getFm(opcode);
-
- /* Check for unordered condition and convert all operands to 80-bit
- format.
- ?? Might be some mileage in avoiding this conversion if possible.
- Eg, if both operands are 32-bit, detect this and do a 32-bit
- comparison (cheaper than an 80-bit one). */
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- //printk("single.\n");
- if (float32_is_nan(fpa11->fpreg[Fn].fSingle))
- goto unordered;
- rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- //printk("double.\n");
- if (float64_is_nan(fpa11->fpreg[Fn].fDouble))
- goto unordered;
- rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status);
- break;
-
- case typeExtended:
- //printk("extended.\n");
- if (floatx80_is_nan(fpa11->fpreg[Fn].fExtended))
- goto unordered;
- rFn = fpa11->fpreg[Fn].fExtended;
- break;
-
- default: return 0;
- }
-
- if (CONSTANT_FM(opcode))
- {
- //printk("Fm is a constant: #%d.\n",Fm);
- rFm = getExtendedConstant(Fm);
- if (floatx80_is_nan(rFm))
- goto unordered;
- }
- else
- {
- //printk("Fm = r%d which contains a ",Fm);
- switch (fpa11->fType[Fm])
- {
- case typeSingle:
- //printk("single.\n");
- if (float32_is_nan(fpa11->fpreg[Fm].fSingle))
- goto unordered;
- rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status);
- break;
-
- case typeDouble:
- //printk("double.\n");
- if (float64_is_nan(fpa11->fpreg[Fm].fDouble))
- goto unordered;
- rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble, &fpa11->fp_status);
- break;
-
- case typeExtended:
- //printk("extended.\n");
- if (floatx80_is_nan(fpa11->fpreg[Fm].fExtended))
- goto unordered;
- rFm = fpa11->fpreg[Fm].fExtended;
- break;
-
- default: return 0;
- }
- }
-
- if (n_flag)
- {
- rFm.high ^= 0x8000;
- }
-
- return PerformComparisonOperation(rFn,rFm);
-
- unordered:
- /* ?? The FPA data sheet is pretty vague about this, in particular
- about whether the non-E comparisons can ever raise exceptions.
- This implementation is based on a combination of what it says in
- the data sheet, observation of how the Acorn emulator actually
- behaves (and how programs expect it to) and guesswork. */
- flags |= CC_OVERFLOW;
- flags &= ~(CC_ZERO | CC_NEGATIVE);
-
- if (BIT_AC & readFPSR()) flags |= CC_CARRY;
-
- if (e_flag) float_raise(float_flag_invalid, &fpa11->fp_status);
-
- writeConditionCodes(flags);
- return 1;
-}
diff --git a/target-arm/nwfpe/fpopcode.c b/target-arm/nwfpe/fpopcode.c
deleted file mode 100644
index d29e913..0000000
--- a/target-arm/nwfpe/fpopcode.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-#include "fpsr.h"
-//#include "fpmodule.h"
-//#include "fpmodule.inl"
-
-const floatx80 floatx80Constant[] = {
- { 0x0000000000000000ULL, 0x0000}, /* extended 0.0 */
- { 0x8000000000000000ULL, 0x3fff}, /* extended 1.0 */
- { 0x8000000000000000ULL, 0x4000}, /* extended 2.0 */
- { 0xc000000000000000ULL, 0x4000}, /* extended 3.0 */
- { 0x8000000000000000ULL, 0x4001}, /* extended 4.0 */
- { 0xa000000000000000ULL, 0x4001}, /* extended 5.0 */
- { 0x8000000000000000ULL, 0x3ffe}, /* extended 0.5 */
- { 0xa000000000000000ULL, 0x4002} /* extended 10.0 */
-};
-
-const float64 float64Constant[] = {
- 0x0000000000000000ULL, /* double 0.0 */
- 0x3ff0000000000000ULL, /* double 1.0 */
- 0x4000000000000000ULL, /* double 2.0 */
- 0x4008000000000000ULL, /* double 3.0 */
- 0x4010000000000000ULL, /* double 4.0 */
- 0x4014000000000000ULL, /* double 5.0 */
- 0x3fe0000000000000ULL, /* double 0.5 */
- 0x4024000000000000ULL /* double 10.0 */
-};
-
-const float32 float32Constant[] = {
- 0x00000000, /* single 0.0 */
- 0x3f800000, /* single 1.0 */
- 0x40000000, /* single 2.0 */
- 0x40400000, /* single 3.0 */
- 0x40800000, /* single 4.0 */
- 0x40a00000, /* single 5.0 */
- 0x3f000000, /* single 0.5 */
- 0x41200000 /* single 10.0 */
-};
-
-unsigned int getTransferLength(const unsigned int opcode)
-{
- unsigned int nRc;
-
- switch (opcode & MASK_TRANSFER_LENGTH)
- {
- case 0x00000000: nRc = 1; break; /* single precision */
- case 0x00008000: nRc = 2; break; /* double precision */
- case 0x00400000: nRc = 3; break; /* extended precision */
- default: nRc = 0;
- }
-
- return(nRc);
-}
-
-unsigned int getRegisterCount(const unsigned int opcode)
-{
- unsigned int nRc;
-
- switch (opcode & MASK_REGISTER_COUNT)
- {
- case 0x00000000: nRc = 4; break;
- case 0x00008000: nRc = 1; break;
- case 0x00400000: nRc = 2; break;
- case 0x00408000: nRc = 3; break;
- default: nRc = 0;
- }
-
- return(nRc);
-}
-
-unsigned int getRoundingPrecision(const unsigned int opcode)
-{
- unsigned int nRc;
-
- switch (opcode & MASK_ROUNDING_PRECISION)
- {
- case 0x00000000: nRc = 1; break;
- case 0x00000080: nRc = 2; break;
- case 0x00080000: nRc = 3; break;
- default: nRc = 0;
- }
-
- return(nRc);
-}
-
-unsigned int getDestinationSize(const unsigned int opcode)
-{
- unsigned int nRc;
-
- switch (opcode & MASK_DESTINATION_SIZE)
- {
- case 0x00000000: nRc = typeSingle; break;
- case 0x00000080: nRc = typeDouble; break;
- case 0x00080000: nRc = typeExtended; break;
- default: nRc = typeNone;
- }
-
- return(nRc);
-}
-
-/* condition code lookup table
- index into the table is test code: EQ, NE, ... LT, GT, AL, NV
- bit position in short is condition code: NZCV */
-static const unsigned short aCC[16] = {
- 0xF0F0, // EQ == Z set
- 0x0F0F, // NE
- 0xCCCC, // CS == C set
- 0x3333, // CC
- 0xFF00, // MI == N set
- 0x00FF, // PL
- 0xAAAA, // VS == V set
- 0x5555, // VC
- 0x0C0C, // HI == C set && Z clear
- 0xF3F3, // LS == C clear || Z set
- 0xAA55, // GE == (N==V)
- 0x55AA, // LT == (N!=V)
- 0x0A05, // GT == (!Z && (N==V))
- 0xF5FA, // LE == (Z || (N!=V))
- 0xFFFF, // AL always
- 0 // NV
-};
-
-unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes)
-{
- return (aCC[opcode>>28] >> (ccodes>>28)) & 1;
-}
diff --git a/target-arm/nwfpe/fpopcode.h b/target-arm/nwfpe/fpopcode.h
deleted file mode 100644
index 13c7419..0000000
--- a/target-arm/nwfpe/fpopcode.h
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef __FPOPCODE_H__
-#define __FPOPCODE_H__
-
-/*
-ARM Floating Point Instruction Classes
-| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
-|c o n d|1 1 0 P|U|u|W|L| Rn |v| Fd |0|0|0|1| o f f s e t | CPDT
-|c o n d|1 1 0 P|U|w|W|L| Rn |x| Fd |0|0|0|1| o f f s e t | CPDT
-| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
-|c o n d|1 1 1 0|a|b|c|d|e| Fn |j| Fd |0|0|0|1|f|g|h|0|i| Fm | CPDO
-|c o n d|1 1 1 0|a|b|c|L|e| Fn | Rd |0|0|0|1|f|g|h|1|i| Fm | CPRT
-|c o n d|1 1 1 0|a|b|c|1|e| Fn |1|1|1|1|0|0|0|1|f|g|h|1|i| Fm | comparisons
-| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
-
-CPDT data transfer instructions
- LDF, STF, LFM, SFM
-
-CPDO dyadic arithmetic instructions
- ADF, MUF, SUF, RSF, DVF, RDF,
- POW, RPW, RMF, FML, FDV, FRD, POL
-
-CPDO monadic arithmetic instructions
- MVF, MNF, ABS, RND, SQT, LOG, LGN, EXP,
- SIN, COS, TAN, ASN, ACS, ATN, URD, NRM
-
-CPRT joint arithmetic/data transfer instructions
- FIX (arithmetic followed by load/store)
- FLT (load/store followed by arithmetic)
- CMF, CNF CMFE, CNFE (comparisons)
- WFS, RFS (write/read floating point status register)
- WFC, RFC (write/read floating point control register)
-
-cond condition codes
-P pre/post index bit: 0 = postindex, 1 = preindex
-U up/down bit: 0 = stack grows down, 1 = stack grows up
-W write back bit: 1 = update base register (Rn)
-L load/store bit: 0 = store, 1 = load
-Rn base register
-Rd destination/source register
-Fd floating point destination register
-Fn floating point source register
-Fm floating point source register or floating point constant
-
-uv transfer length (TABLE 1)
-wx register count (TABLE 2)
-abcd arithmetic opcode (TABLES 3 & 4)
-ef destination size (rounding precision) (TABLE 5)
-gh rounding mode (TABLE 6)
-j dyadic/monadic bit: 0 = dyadic, 1 = monadic
-i constant bit: 1 = constant (TABLE 6)
-*/
-
-/*
-TABLE 1
-+-------------------------+---+---+---------+---------+
-| Precision | u | v | FPSR.EP | length |
-+-------------------------+---+---+---------+---------+
-| Single | 0 ü 0 | x | 1 words |
-| Double | 1 ü 1 | x | 2 words |
-| Extended | 1 ü 1 | x | 3 words |
-| Packed decimal | 1 ü 1 | 0 | 3 words |
-| Expanded packed decimal | 1 ü 1 | 1 | 4 words |
-+-------------------------+---+---+---------+---------+
-Note: x = don't care
-*/
-
-/*
-TABLE 2
-+---+---+---------------------------------+
-| w | x | Number of registers to transfer |
-+---+---+---------------------------------+
-| 0 ü 1 | 1 |
-| 1 ü 0 | 2 |
-| 1 ü 1 | 3 |
-| 0 ü 0 | 4 |
-+---+---+---------------------------------+
-*/
-
-/*
-TABLE 3: Dyadic Floating Point Opcodes
-+---+---+---+---+----------+-----------------------+-----------------------+
-| a | b | c | d | Mnemonic | Description | Operation |
-+---+---+---+---+----------+-----------------------+-----------------------+
-| 0 | 0 | 0 | 0 | ADF | Add | Fd := Fn + Fm |
-| 0 | 0 | 0 | 1 | MUF | Multiply | Fd := Fn * Fm |
-| 0 | 0 | 1 | 0 | SUF | Subtract | Fd := Fn - Fm |
-| 0 | 0 | 1 | 1 | RSF | Reverse subtract | Fd := Fm - Fn |
-| 0 | 1 | 0 | 0 | DVF | Divide | Fd := Fn / Fm |
-| 0 | 1 | 0 | 1 | RDF | Reverse divide | Fd := Fm / Fn |
-| 0 | 1 | 1 | 0 | POW | Power | Fd := Fn ^ Fm |
-| 0 | 1 | 1 | 1 | RPW | Reverse power | Fd := Fm ^ Fn |
-| 1 | 0 | 0 | 0 | RMF | Remainder | Fd := IEEE rem(Fn/Fm) |
-| 1 | 0 | 0 | 1 | FML | Fast Multiply | Fd := Fn * Fm |
-| 1 | 0 | 1 | 0 | FDV | Fast Divide | Fd := Fn / Fm |
-| 1 | 0 | 1 | 1 | FRD | Fast reverse divide | Fd := Fm / Fn |
-| 1 | 1 | 0 | 0 | POL | Polar angle (ArcTan2) | Fd := arctan2(Fn,Fm) |
-| 1 | 1 | 0 | 1 | | undefined instruction | trap |
-| 1 | 1 | 1 | 0 | | undefined instruction | trap |
-| 1 | 1 | 1 | 1 | | undefined instruction | trap |
-+---+---+---+---+----------+-----------------------+-----------------------+
-Note: POW, RPW, POL are deprecated, and are available for backwards
- compatibility only.
-*/
-
-/*
-TABLE 4: Monadic Floating Point Opcodes
-+---+---+---+---+----------+-----------------------+-----------------------+
-| a | b | c | d | Mnemonic | Description | Operation |
-+---+---+---+---+----------+-----------------------+-----------------------+
-| 0 | 0 | 0 | 0 | MVF | Move | Fd := Fm |
-| 0 | 0 | 0 | 1 | MNF | Move negated | Fd := - Fm |
-| 0 | 0 | 1 | 0 | ABS | Absolute value | Fd := abs(Fm) |
-| 0 | 0 | 1 | 1 | RND | Round to integer | Fd := int(Fm) |
-| 0 | 1 | 0 | 0 | SQT | Square root | Fd := sqrt(Fm) |
-| 0 | 1 | 0 | 1 | LOG | Log base 10 | Fd := log10(Fm) |
-| 0 | 1 | 1 | 0 | LGN | Log base e | Fd := ln(Fm) |
-| 0 | 1 | 1 | 1 | EXP | Exponent | Fd := e ^ Fm |
-| 1 | 0 | 0 | 0 | SIN | Sine | Fd := sin(Fm) |
-| 1 | 0 | 0 | 1 | COS | Cosine | Fd := cos(Fm) |
-| 1 | 0 | 1 | 0 | TAN | Tangent | Fd := tan(Fm) |
-| 1 | 0 | 1 | 1 | ASN | Arc Sine | Fd := arcsin(Fm) |
-| 1 | 1 | 0 | 0 | ACS | Arc Cosine | Fd := arccos(Fm) |
-| 1 | 1 | 0 | 1 | ATN | Arc Tangent | Fd := arctan(Fm) |
-| 1 | 1 | 1 | 0 | URD | Unnormalized round | Fd := int(Fm) |
-| 1 | 1 | 1 | 1 | NRM | Normalize | Fd := norm(Fm) |
-+---+---+---+---+----------+-----------------------+-----------------------+
-Note: LOG, LGN, EXP, SIN, COS, TAN, ASN, ACS, ATN are deprecated, and are
- available for backwards compatibility only.
-*/
-
-/*
-TABLE 5
-+-------------------------+---+---+
-| Rounding Precision | e | f |
-+-------------------------+---+---+
-| IEEE Single precision | 0 ü 0 |
-| IEEE Double precision | 0 ü 1 |
-| IEEE Extended precision | 1 ü 0 |
-| undefined (trap) | 1 ü 1 |
-+-------------------------+---+---+
-*/
-
-/*
-TABLE 5
-+---------------------------------+---+---+
-| Rounding Mode | g | h |
-+---------------------------------+---+---+
-| Round to nearest (default) | 0 ü 0 |
-| Round toward plus infinity | 0 ü 1 |
-| Round toward negative infinity | 1 ü 0 |
-| Round toward zero | 1 ü 1 |
-+---------------------------------+---+---+
-*/
-
-/*
-===
-=== Definitions for load and store instructions
-===
-*/
-
-/* bit masks */
-#define BIT_PREINDEX 0x01000000
-#define BIT_UP 0x00800000
-#define BIT_WRITE_BACK 0x00200000
-#define BIT_LOAD 0x00100000
-
-/* masks for load/store */
-#define MASK_CPDT 0x0c000000 /* data processing opcode */
-#define MASK_OFFSET 0x000000ff
-#define MASK_TRANSFER_LENGTH 0x00408000
-#define MASK_REGISTER_COUNT MASK_TRANSFER_LENGTH
-#define MASK_COPROCESSOR 0x00000f00
-
-/* Tests for transfer length */
-#define TRANSFER_SINGLE 0x00000000
-#define TRANSFER_DOUBLE 0x00008000
-#define TRANSFER_EXTENDED 0x00400000
-#define TRANSFER_PACKED MASK_TRANSFER_LENGTH
-
-/* Get the coprocessor number from the opcode. */
-#define getCoprocessorNumber(opcode) ((opcode & MASK_COPROCESSOR) >> 8)
-
-/* Get the offset from the opcode. */
-#define getOffset(opcode) (opcode & MASK_OFFSET)
-
-/* Tests for specific data transfer load/store opcodes. */
-#define TEST_OPCODE(opcode,mask) (((opcode) & (mask)) == (mask))
-
-#define LOAD_OP(opcode) TEST_OPCODE((opcode),MASK_CPDT | BIT_LOAD)
-#define STORE_OP(opcode) ((opcode & (MASK_CPDT | BIT_LOAD)) == MASK_CPDT)
-
-#define LDF_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 1))
-#define LFM_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 2))
-#define STF_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 1))
-#define SFM_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 2))
-
-#define PREINDEXED(opcode) ((opcode & BIT_PREINDEX) != 0)
-#define POSTINDEXED(opcode) ((opcode & BIT_PREINDEX) == 0)
-#define BIT_UP_SET(opcode) ((opcode & BIT_UP) != 0)
-#define BIT_UP_CLEAR(opcode) ((opcode & BIT_DOWN) == 0)
-#define WRITE_BACK(opcode) ((opcode & BIT_WRITE_BACK) != 0)
-#define LOAD(opcode) ((opcode & BIT_LOAD) != 0)
-#define STORE(opcode) ((opcode & BIT_LOAD) == 0)
-
-/*
-===
-=== Definitions for arithmetic instructions
-===
-*/
-/* bit masks */
-#define BIT_MONADIC 0x00008000
-#define BIT_CONSTANT 0x00000008
-
-#define CONSTANT_FM(opcode) ((opcode & BIT_CONSTANT) != 0)
-#define MONADIC_INSTRUCTION(opcode) ((opcode & BIT_MONADIC) != 0)
-
-/* instruction identification masks */
-#define MASK_CPDO 0x0e000000 /* arithmetic opcode */
-#define MASK_ARITHMETIC_OPCODE 0x00f08000
-#define MASK_DESTINATION_SIZE 0x00080080
-
-/* dyadic arithmetic opcodes. */
-#define ADF_CODE 0x00000000
-#define MUF_CODE 0x00100000
-#define SUF_CODE 0x00200000
-#define RSF_CODE 0x00300000
-#define DVF_CODE 0x00400000
-#define RDF_CODE 0x00500000
-#define POW_CODE 0x00600000
-#define RPW_CODE 0x00700000
-#define RMF_CODE 0x00800000
-#define FML_CODE 0x00900000
-#define FDV_CODE 0x00a00000
-#define FRD_CODE 0x00b00000
-#define POL_CODE 0x00c00000
-/* 0x00d00000 is an invalid dyadic arithmetic opcode */
-/* 0x00e00000 is an invalid dyadic arithmetic opcode */
-/* 0x00f00000 is an invalid dyadic arithmetic opcode */
-
-/* monadic arithmetic opcodes. */
-#define MVF_CODE 0x00008000
-#define MNF_CODE 0x00108000
-#define ABS_CODE 0x00208000
-#define RND_CODE 0x00308000
-#define SQT_CODE 0x00408000
-#define LOG_CODE 0x00508000
-#define LGN_CODE 0x00608000
-#define EXP_CODE 0x00708000
-#define SIN_CODE 0x00808000
-#define COS_CODE 0x00908000
-#define TAN_CODE 0x00a08000
-#define ASN_CODE 0x00b08000
-#define ACS_CODE 0x00c08000
-#define ATN_CODE 0x00d08000
-#define URD_CODE 0x00e08000
-#define NRM_CODE 0x00f08000
-
-/*
-===
-=== Definitions for register transfer and comparison instructions
-===
-*/
-
-#define MASK_CPRT 0x0e000010 /* register transfer opcode */
-#define MASK_CPRT_CODE 0x00f00000
-#define FLT_CODE 0x00000000
-#define FIX_CODE 0x00100000
-#define WFS_CODE 0x00200000
-#define RFS_CODE 0x00300000
-#define WFC_CODE 0x00400000
-#define RFC_CODE 0x00500000
-#define CMF_CODE 0x00900000
-#define CNF_CODE 0x00b00000
-#define CMFE_CODE 0x00d00000
-#define CNFE_CODE 0x00f00000
-
-/*
-===
-=== Common definitions
-===
-*/
-
-/* register masks */
-#define MASK_Rd 0x0000f000
-#define MASK_Rn 0x000f0000
-#define MASK_Fd 0x00007000
-#define MASK_Fm 0x00000007
-#define MASK_Fn 0x00070000
-
-/* condition code masks */
-#define CC_MASK 0xf0000000
-#define CC_NEGATIVE 0x80000000
-#define CC_ZERO 0x40000000
-#define CC_CARRY 0x20000000
-#define CC_OVERFLOW 0x10000000
-#define CC_EQ 0x00000000
-#define CC_NE 0x10000000
-#define CC_CS 0x20000000
-#define CC_HS CC_CS
-#define CC_CC 0x30000000
-#define CC_LO CC_CC
-#define CC_MI 0x40000000
-#define CC_PL 0x50000000
-#define CC_VS 0x60000000
-#define CC_VC 0x70000000
-#define CC_HI 0x80000000
-#define CC_LS 0x90000000
-#define CC_GE 0xa0000000
-#define CC_LT 0xb0000000
-#define CC_GT 0xc0000000
-#define CC_LE 0xd0000000
-#define CC_AL 0xe0000000
-#define CC_NV 0xf0000000
-
-/* rounding masks/values */
-#define MASK_ROUNDING_MODE 0x00000060
-#define ROUND_TO_NEAREST 0x00000000
-#define ROUND_TO_PLUS_INFINITY 0x00000020
-#define ROUND_TO_MINUS_INFINITY 0x00000040
-#define ROUND_TO_ZERO 0x00000060
-
-#define MASK_ROUNDING_PRECISION 0x00080080
-#define ROUND_SINGLE 0x00000000
-#define ROUND_DOUBLE 0x00000080
-#define ROUND_EXTENDED 0x00080000
-
-/* Get the condition code from the opcode. */
-#define getCondition(opcode) (opcode >> 28)
-
-/* Get the source register from the opcode. */
-#define getRn(opcode) ((opcode & MASK_Rn) >> 16)
-
-/* Get the destination floating point register from the opcode. */
-#define getFd(opcode) ((opcode & MASK_Fd) >> 12)
-
-/* Get the first source floating point register from the opcode. */
-#define getFn(opcode) ((opcode & MASK_Fn) >> 16)
-
-/* Get the second source floating point register from the opcode. */
-#define getFm(opcode) (opcode & MASK_Fm)
-
-/* Get the destination register from the opcode. */
-#define getRd(opcode) ((opcode & MASK_Rd) >> 12)
-
-/* Get the rounding mode from the opcode. */
-#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5)
-
-static inline const floatx80 getExtendedConstant(const unsigned int nIndex)
-{
- extern const floatx80 floatx80Constant[];
- return floatx80Constant[nIndex];
-}
-
-static inline const float64 getDoubleConstant(const unsigned int nIndex)
-{
- extern const float64 float64Constant[];
- return float64Constant[nIndex];
-}
-
-static inline const float32 getSingleConstant(const unsigned int nIndex)
-{
- extern const float32 float32Constant[];
- return float32Constant[nIndex];
-}
-
-extern unsigned int getRegisterCount(const unsigned int opcode);
-extern unsigned int getDestinationSize(const unsigned int opcode);
-
-#endif
diff --git a/target-arm/nwfpe/fpsr.h b/target-arm/nwfpe/fpsr.h
deleted file mode 100644
index 6dafb0f..0000000
--- a/target-arm/nwfpe/fpsr.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.com, 1998-1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef __FPSR_H__
-#define __FPSR_H__
-
-/*
-The FPSR is a 32 bit register consisting of 4 parts, each exactly
-one byte.
-
- SYSTEM ID
- EXCEPTION TRAP ENABLE BYTE
- SYSTEM CONTROL BYTE
- CUMULATIVE EXCEPTION FLAGS BYTE
-
-The FPCR is a 32 bit register consisting of bit flags.
-*/
-
-/* SYSTEM ID
-------------
-Note: the system id byte is read only */
-
-typedef unsigned int FPSR; /* type for floating point status register */
-typedef unsigned int FPCR; /* type for floating point control register */
-
-#define MASK_SYSID 0xff000000
-#define BIT_HARDWARE 0x80000000
-#define FP_EMULATOR 0x01000000 /* System ID for emulator */
-#define FP_ACCELERATOR 0x81000000 /* System ID for FPA11 */
-
-/* EXCEPTION TRAP ENABLE BYTE
------------------------------ */
-
-#define MASK_TRAP_ENABLE 0x00ff0000
-#define MASK_TRAP_ENABLE_STRICT 0x001f0000
-#define BIT_IXE 0x00100000 /* inexact exception enable */
-#define BIT_UFE 0x00080000 /* underflow exception enable */
-#define BIT_OFE 0x00040000 /* overflow exception enable */
-#define BIT_DZE 0x00020000 /* divide by zero exception enable */
-#define BIT_IOE 0x00010000 /* invalid operation exception enable */
-
-/* SYSTEM CONTROL BYTE
----------------------- */
-
-#define MASK_SYSTEM_CONTROL 0x0000ff00
-#define MASK_TRAP_STRICT 0x00001f00
-
-#define BIT_AC 0x00001000 /* use alternative C-flag definition
- for compares */
-#define BIT_EP 0x00000800 /* use expanded packed decimal format */
-#define BIT_SO 0x00000400 /* select synchronous operation of FPA */
-#define BIT_NE 0x00000200 /* NaN exception bit */
-#define BIT_ND 0x00000100 /* no denormalized numbers bit */
-
-/* CUMULATIVE EXCEPTION FLAGS BYTE
----------------------------------- */
-
-#define MASK_EXCEPTION_FLAGS 0x000000ff
-#define MASK_EXCEPTION_FLAGS_STRICT 0x0000001f
-
-#define BIT_IXC 0x00000010 /* inexact exception flag */
-#define BIT_UFC 0x00000008 /* underflow exception flag */
-#define BIT_OFC 0x00000004 /* overfloat exception flag */
-#define BIT_DZC 0x00000002 /* divide by zero exception flag */
-#define BIT_IOC 0x00000001 /* invalid operation exception flag */
-
-/* Floating Point Control Register
-----------------------------------*/
-
-#define BIT_RU 0x80000000 /* rounded up bit */
-#define BIT_IE 0x10000000 /* inexact bit */
-#define BIT_MO 0x08000000 /* mantissa overflow bit */
-#define BIT_EO 0x04000000 /* exponent overflow bit */
-#define BIT_SB 0x00000800 /* store bounce */
-#define BIT_AB 0x00000400 /* arithmetic bounce */
-#define BIT_RE 0x00000200 /* rounding exception */
-#define BIT_DA 0x00000100 /* disable FPA */
-
-#define MASK_OP 0x00f08010 /* AU operation code */
-#define MASK_PR 0x00080080 /* AU precision */
-#define MASK_S1 0x00070000 /* AU source register 1 */
-#define MASK_S2 0x00000007 /* AU source register 2 */
-#define MASK_DS 0x00007000 /* AU destination register */
-#define MASK_RM 0x00000060 /* AU rounding mode */
-#define MASK_ALU 0x9cfff2ff /* only ALU can write these bits */
-#define MASK_RESET 0x00000d00 /* bits set on reset, all others cleared */
-#define MASK_WFC MASK_RESET
-#define MASK_RFC ~MASK_RESET
-
-#endif
diff --git a/target-arm/nwfpe/single_cpdo.c b/target-arm/nwfpe/single_cpdo.c
deleted file mode 100644
index 7dd2620..0000000
--- a/target-arm/nwfpe/single_cpdo.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- NetWinder Floating Point Emulator
- (c) Rebel.COM, 1998,1999
-
- Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#include "fpa11.h"
-#include "softfloat.h"
-#include "fpopcode.h"
-
-float32 float32_exp(float32 Fm);
-float32 float32_ln(float32 Fm);
-float32 float32_sin(float32 rFm);
-float32 float32_cos(float32 rFm);
-float32 float32_arcsin(float32 rFm);
-float32 float32_arctan(float32 rFm);
-float32 float32_log(float32 rFm);
-float32 float32_tan(float32 rFm);
-float32 float32_arccos(float32 rFm);
-float32 float32_pow(float32 rFn,float32 rFm);
-float32 float32_pol(float32 rFn,float32 rFm);
-
-unsigned int SingleCPDO(const unsigned int opcode)
-{
- FPA11 *fpa11 = GET_FPA11();
- float32 rFm, rFn = 0;
- unsigned int Fd, Fm, Fn, nRc = 1;
-
- Fm = getFm(opcode);
- if (CONSTANT_FM(opcode))
- {
- rFm = getSingleConstant(Fm);
- }
- else
- {
- switch (fpa11->fType[Fm])
- {
- case typeSingle:
- rFm = fpa11->fpreg[Fm].fSingle;
- break;
-
- default: return 0;
- }
- }
-
- if (!MONADIC_INSTRUCTION(opcode))
- {
- Fn = getFn(opcode);
- switch (fpa11->fType[Fn])
- {
- case typeSingle:
- rFn = fpa11->fpreg[Fn].fSingle;
- break;
-
- default: return 0;
- }
- }
-
- Fd = getFd(opcode);
- switch (opcode & MASK_ARITHMETIC_OPCODE)
- {
- /* dyadic opcodes */
- case ADF_CODE:
- fpa11->fpreg[Fd].fSingle = float32_add(rFn,rFm, &fpa11->fp_status);
- break;
-
- case MUF_CODE:
- case FML_CODE:
- fpa11->fpreg[Fd].fSingle = float32_mul(rFn,rFm, &fpa11->fp_status);
- break;
-
- case SUF_CODE:
- fpa11->fpreg[Fd].fSingle = float32_sub(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RSF_CODE:
- fpa11->fpreg[Fd].fSingle = float32_sub(rFm,rFn, &fpa11->fp_status);
- break;
-
- case DVF_CODE:
- case FDV_CODE:
- fpa11->fpreg[Fd].fSingle = float32_div(rFn,rFm, &fpa11->fp_status);
- break;
-
- case RDF_CODE:
- case FRD_CODE:
- fpa11->fpreg[Fd].fSingle = float32_div(rFm,rFn, &fpa11->fp_status);
- break;
-
-#if 0
- case POW_CODE:
- fpa11->fpreg[Fd].fSingle = float32_pow(rFn,rFm);
- break;
-
- case RPW_CODE:
- fpa11->fpreg[Fd].fSingle = float32_pow(rFm,rFn);
- break;
-#endif
-
- case RMF_CODE:
- fpa11->fpreg[Fd].fSingle = float32_rem(rFn,rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case POL_CODE:
- fpa11->fpreg[Fd].fSingle = float32_pol(rFn,rFm);
- break;
-#endif
-
- /* monadic opcodes */
- case MVF_CODE:
- fpa11->fpreg[Fd].fSingle = rFm;
- break;
-
- case MNF_CODE:
- rFm ^= 0x80000000;
- fpa11->fpreg[Fd].fSingle = rFm;
- break;
-
- case ABS_CODE:
- rFm &= 0x7fffffff;
- fpa11->fpreg[Fd].fSingle = rFm;
- break;
-
- case RND_CODE:
- case URD_CODE:
- fpa11->fpreg[Fd].fSingle = float32_round_to_int(rFm, &fpa11->fp_status);
- break;
-
- case SQT_CODE:
- fpa11->fpreg[Fd].fSingle = float32_sqrt(rFm, &fpa11->fp_status);
- break;
-
-#if 0
- case LOG_CODE:
- fpa11->fpreg[Fd].fSingle = float32_log(rFm);
- break;
-
- case LGN_CODE:
- fpa11->fpreg[Fd].fSingle = float32_ln(rFm);
- break;
-
- case EXP_CODE:
- fpa11->fpreg[Fd].fSingle = float32_exp(rFm);
- break;
-
- case SIN_CODE:
- fpa11->fpreg[Fd].fSingle = float32_sin(rFm);
- break;
-
- case COS_CODE:
- fpa11->fpreg[Fd].fSingle = float32_cos(rFm);
- break;
-
- case TAN_CODE:
- fpa11->fpreg[Fd].fSingle = float32_tan(rFm);
- break;
-
- case ASN_CODE:
- fpa11->fpreg[Fd].fSingle = float32_arcsin(rFm);
- break;
-
- case ACS_CODE:
- fpa11->fpreg[Fd].fSingle = float32_arccos(rFm);
- break;
-
- case ATN_CODE:
- fpa11->fpreg[Fd].fSingle = float32_arctan(rFm);
- break;
-#endif
-
- case NRM_CODE:
- break;
-
- default:
- {
- nRc = 0;
- }
- }
-
- if (0 != nRc) fpa11->fType[Fd] = typeSingle;
- return nRc;
-}
-
-#if 0
-float32 float32_exp(float32 Fm)
-{
-//series
-}
-
-float32 float32_ln(float32 Fm)
-{
-//series
-}
-
-float32 float32_sin(float32 rFm)
-{
-//series
-}
-
-float32 float32_cos(float32 rFm)
-{
-//series
-}
-
-float32 float32_arcsin(float32 rFm)
-{
-//series
-}
-
-float32 float32_arctan(float32 rFm)
-{
- //series
-}
-
-float32 float32_arccos(float32 rFm)
-{
- //return float32_sub(halfPi,float32_arcsin(rFm));
-}
-
-float32 float32_log(float32 rFm)
-{
- return float32_div(float32_ln(rFm),getSingleConstant(7));
-}
-
-float32 float32_tan(float32 rFm)
-{
- return float32_div(float32_sin(rFm),float32_cos(rFm));
-}
-
-float32 float32_pow(float32 rFn,float32 rFm)
-{
- return float32_exp(float32_mul(rFm,float32_ln(rFn)));
-}
-
-float32 float32_pol(float32 rFn,float32 rFm)
-{
- return float32_arctan(float32_div(rFn,rFm));
-}
-#endif
diff --git a/target-arm/op.c b/target-arm/op.c
deleted file mode 100644
index 48a81de..0000000
--- a/target-arm/op.c
+++ /dev/null
@@ -1,1230 +0,0 @@
-/*
- * ARM micro operations
- *
- * Copyright (c) 2003 Fabrice Bellard
- * Copyright (c) 2005 CodeSourcery, LLC
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include "exec.h"
-#ifdef GEN_TRACE
-#include "trace.h"
-#endif
-
-#define REGNAME r0
-#define REG (env->regs[0])
-#include "op_template.h"
-
-#define REGNAME r1
-#define REG (env->regs[1])
-#include "op_template.h"
-
-#define REGNAME r2
-#define REG (env->regs[2])
-#include "op_template.h"
-
-#define REGNAME r3
-#define REG (env->regs[3])
-#include "op_template.h"
-
-#define REGNAME r4
-#define REG (env->regs[4])
-#include "op_template.h"
-
-#define REGNAME r5
-#define REG (env->regs[5])
-#include "op_template.h"
-
-#define REGNAME r6
-#define REG (env->regs[6])
-#include "op_template.h"
-
-#define REGNAME r7
-#define REG (env->regs[7])
-#include "op_template.h"
-
-#define REGNAME r8
-#define REG (env->regs[8])
-#include "op_template.h"
-
-#define REGNAME r9
-#define REG (env->regs[9])
-#include "op_template.h"
-
-#define REGNAME r10
-#define REG (env->regs[10])
-#include "op_template.h"
-
-#define REGNAME r11
-#define REG (env->regs[11])
-#include "op_template.h"
-
-#define REGNAME r12
-#define REG (env->regs[12])
-#include "op_template.h"
-
-#define REGNAME r13
-#define REG (env->regs[13])
-#include "op_template.h"
-
-#define REGNAME r14
-#define REG (env->regs[14])
-#include "op_template.h"
-
-#define REGNAME r15
-#define REG (env->regs[15])
-#define SET_REG(x) REG = x & ~(uint32_t)1
-#include "op_template.h"
-
-#ifdef GEN_TRACE
-void OPPROTO op_shutdown(void)
-{
- extern void qemu_system_shutdown_request(void);
- qemu_system_shutdown_request();
- EXIT_TB();
-}
-
-void OPPROTO op_trace_bb(void)
-{
- trace_bb_helper(PARAM1, (TranslationBlock *)PARAM2);
-}
-
-void OPPROTO op_trace_insn(void)
-{
- trace_insn_helper();
-}
-
-void OPPROTO op_add_to_sim_time(void)
-{
- sim_time += PARAM1;
-}
-#endif
-
-void OPPROTO op_bx_T0(void)
-{
- env->regs[15] = T0 & ~(uint32_t)1;
- env->thumb = (T0 & 1) != 0;
-}
-
-void OPPROTO op_movl_T0_0(void)
-{
- T0 = 0;
-}
-
-void OPPROTO op_movl_T0_im(void)
-{
- T0 = PARAM1;
-}
-
-void OPPROTO op_movl_T0_T1(void)
-{
- T0 = T1;
-}
-
-void OPPROTO op_movl_T1_im(void)
-{
- T1 = PARAM1;
-}
-
-void OPPROTO op_mov_CF_T1(void)
-{
- env->CF = ((uint32_t)T1) >> 31;
-}
-
-void OPPROTO op_movl_T2_im(void)
-{
- T2 = PARAM1;
-}
-
-void OPPROTO op_addl_T1_im(void)
-{
- T1 += PARAM1;
-}
-
-void OPPROTO op_addl_T1_T2(void)
-{
- T1 += T2;
-}
-
-void OPPROTO op_subl_T1_T2(void)
-{
- T1 -= T2;
-}
-
-void OPPROTO op_addl_T0_T1(void)
-{
- T0 += T1;
-}
-
-void OPPROTO op_addl_T0_T1_cc(void)
-{
- unsigned int src1;
- src1 = T0;
- T0 += T1;
- env->NZF = T0;
- env->CF = T0 < src1;
- env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
-}
-
-void OPPROTO op_adcl_T0_T1(void)
-{
- T0 += T1 + env->CF;
-}
-
-void OPPROTO op_adcl_T0_T1_cc(void)
-{
- unsigned int src1;
- src1 = T0;
- if (!env->CF) {
- T0 += T1;
- env->CF = T0 < src1;
- } else {
- T0 += T1 + 1;
- env->CF = T0 <= src1;
- }
- env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
- env->NZF = T0;
- FORCE_RET();
-}
-
-#define OPSUB(sub, sbc, res, T0, T1) \
- \
-void OPPROTO op_ ## sub ## l_T0_T1(void) \
-{ \
- res = T0 - T1; \
-} \
- \
-void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \
-{ \
- unsigned int src1; \
- src1 = T0; \
- T0 -= T1; \
- env->NZF = T0; \
- env->CF = src1 >= T1; \
- env->VF = (src1 ^ T1) & (src1 ^ T0); \
- res = T0; \
-} \
- \
-void OPPROTO op_ ## sbc ## l_T0_T1(void) \
-{ \
- res = T0 - T1 + env->CF - 1; \
-} \
- \
-void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \
-{ \
- unsigned int src1; \
- src1 = T0; \
- if (!env->CF) { \
- T0 = T0 - T1 - 1; \
- env->CF = src1 > T1; \
- } else { \
- T0 = T0 - T1; \
- env->CF = src1 >= T1; \
- } \
- env->VF = (src1 ^ T1) & (src1 ^ T0); \
- env->NZF = T0; \
- res = T0; \
- FORCE_RET(); \
-}
-
-OPSUB(sub, sbc, T0, T0, T1)
-
-OPSUB(rsb, rsc, T0, T1, T0)
-
-void OPPROTO op_andl_T0_T1(void)
-{
- T0 &= T1;
-}
-
-void OPPROTO op_xorl_T0_T1(void)
-{
- T0 ^= T1;
-}
-
-void OPPROTO op_orl_T0_T1(void)
-{
- T0 |= T1;
-}
-
-void OPPROTO op_bicl_T0_T1(void)
-{
- T0 &= ~T1;
-}
-
-void OPPROTO op_notl_T1(void)
-{
- T1 = ~T1;
-}
-
-void OPPROTO op_logic_T0_cc(void)
-{
- env->NZF = T0;
-}
-
-void OPPROTO op_logic_T1_cc(void)
-{
- env->NZF = T1;
-}
-
-#define EIP (env->regs[15])
-
-void OPPROTO op_test_eq(void)
-{
- if (env->NZF == 0)
- GOTO_LABEL_PARAM(1);;
- FORCE_RET();
-}
-
-void OPPROTO op_test_ne(void)
-{
- if (env->NZF != 0)
- GOTO_LABEL_PARAM(1);;
- FORCE_RET();
-}
-
-void OPPROTO op_test_cs(void)
-{
- if (env->CF != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_cc(void)
-{
- if (env->CF == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_mi(void)
-{
- if ((env->NZF & 0x80000000) != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_pl(void)
-{
- if ((env->NZF & 0x80000000) == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_vs(void)
-{
- if ((env->VF & 0x80000000) != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_vc(void)
-{
- if ((env->VF & 0x80000000) == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_hi(void)
-{
- if (env->CF != 0 && env->NZF != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_ls(void)
-{
- if (env->CF == 0 || env->NZF == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_ge(void)
-{
- if (((env->VF ^ env->NZF) & 0x80000000) == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_lt(void)
-{
- if (((env->VF ^ env->NZF) & 0x80000000) != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_gt(void)
-{
- if (env->NZF != 0 && ((env->VF ^ env->NZF) & 0x80000000) == 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_test_le(void)
-{
- if (env->NZF == 0 || ((env->VF ^ env->NZF) & 0x80000000) != 0)
- GOTO_LABEL_PARAM(1);
- FORCE_RET();
-}
-
-void OPPROTO op_goto_tb0(void)
-{
- GOTO_TB(op_goto_tb0, PARAM1, 0);
-}
-
-void OPPROTO op_goto_tb1(void)
-{
- GOTO_TB(op_goto_tb1, PARAM1, 1);
-}
-
-void OPPROTO op_exit_tb(void)
-{
- EXIT_TB();
-}
-
-void OPPROTO op_movl_T0_cpsr(void)
-{
- T0 = cpsr_read(env);
- FORCE_RET();
-}
-
-void OPPROTO op_movl_T0_spsr(void)
-{
- T0 = env->spsr;
-}
-
-void OPPROTO op_movl_spsr_T0(void)
-{
- uint32_t mask = PARAM1;
- env->spsr = (env->spsr & ~mask) | (T0 & mask);
-}
-
-void OPPROTO op_movl_cpsr_T0(void)
-{
- cpsr_write(env, T0, PARAM1);
- FORCE_RET();
-}
-
-void OPPROTO op_mul_T0_T1(void)
-{
- T0 = T0 * T1;
-}
-
-/* 64 bit unsigned mul */
-void OPPROTO op_mull_T0_T1(void)
-{
- uint64_t res;
- res = (uint64_t)T0 * (uint64_t)T1;
- T1 = res >> 32;
- T0 = res;
-}
-
-/* 64 bit signed mul */
-void OPPROTO op_imull_T0_T1(void)
-{
- uint64_t res;
- res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
- T1 = res >> 32;
- T0 = res;
-}
-
-/* 48 bit signed mul, top 32 bits */
-void OPPROTO op_imulw_T0_T1(void)
-{
- uint64_t res;
- res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
- T0 = res >> 16;
-}
-
-void OPPROTO op_addq_T0_T1(void)
-{
- uint64_t res;
- res = ((uint64_t)T1 << 32) | T0;
- res += ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]);
- T1 = res >> 32;
- T0 = res;
-}
-
-void OPPROTO op_addq_lo_T0_T1(void)
-{
- uint64_t res;
- res = ((uint64_t)T1 << 32) | T0;
- res += (uint64_t)(env->regs[PARAM1]);
- T1 = res >> 32;
- T0 = res;
-}
-
-void OPPROTO op_logicq_cc(void)
-{
- env->NZF = (T1 & 0x80000000) | ((T0 | T1) != 0);
-}
-
-/* memory access */
-
-#define MEMSUFFIX _raw
-#include "op_mem.h"
-
-#if !defined(CONFIG_USER_ONLY)
-#define MEMSUFFIX _user
-#include "op_mem.h"
-#define MEMSUFFIX _kernel
-#include "op_mem.h"
-#endif
-
-/* shifts */
-
-/* T1 based */
-
-void OPPROTO op_shll_T1_im(void)
-{
- T1 = T1 << PARAM1;
-}
-
-void OPPROTO op_shrl_T1_im(void)
-{
- T1 = (uint32_t)T1 >> PARAM1;
-}
-
-void OPPROTO op_shrl_T1_0(void)
-{
- T1 = 0;
-}
-
-void OPPROTO op_sarl_T1_im(void)
-{
- T1 = (int32_t)T1 >> PARAM1;
-}
-
-void OPPROTO op_sarl_T1_0(void)
-{
- T1 = (int32_t)T1 >> 31;
-}
-
-void OPPROTO op_rorl_T1_im(void)
-{
- int shift;
- shift = PARAM1;
- T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
-}
-
-void OPPROTO op_rrxl_T1(void)
-{
- T1 = ((uint32_t)T1 >> 1) | ((uint32_t)env->CF << 31);
-}
-
-/* T1 based, set C flag */
-void OPPROTO op_shll_T1_im_cc(void)
-{
- env->CF = (T1 >> (32 - PARAM1)) & 1;
- T1 = T1 << PARAM1;
-}
-
-void OPPROTO op_shrl_T1_im_cc(void)
-{
- env->CF = (T1 >> (PARAM1 - 1)) & 1;
- T1 = (uint32_t)T1 >> PARAM1;
-}
-
-void OPPROTO op_shrl_T1_0_cc(void)
-{
- env->CF = (T1 >> 31) & 1;
- T1 = 0;
-}
-
-void OPPROTO op_sarl_T1_im_cc(void)
-{
- env->CF = (T1 >> (PARAM1 - 1)) & 1;
- T1 = (int32_t)T1 >> PARAM1;
-}
-
-void OPPROTO op_sarl_T1_0_cc(void)
-{
- env->CF = (T1 >> 31) & 1;
- T1 = (int32_t)T1 >> 31;
-}
-
-void OPPROTO op_rorl_T1_im_cc(void)
-{
- int shift;
- shift = PARAM1;
- env->CF = (T1 >> (shift - 1)) & 1;
- T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
-}
-
-void OPPROTO op_rrxl_T1_cc(void)
-{
- uint32_t c;
- c = T1 & 1;
- T1 = ((uint32_t)T1 >> 1) | ((uint32_t)env->CF << 31);
- env->CF = c;
-}
-
-/* T2 based */
-void OPPROTO op_shll_T2_im(void)
-{
- T2 = T2 << PARAM1;
-}
-
-void OPPROTO op_shrl_T2_im(void)
-{
- T2 = (uint32_t)T2 >> PARAM1;
-}
-
-void OPPROTO op_shrl_T2_0(void)
-{
- T2 = 0;
-}
-
-void OPPROTO op_sarl_T2_im(void)
-{
- T2 = (int32_t)T2 >> PARAM1;
-}
-
-void OPPROTO op_sarl_T2_0(void)
-{
- T2 = (int32_t)T2 >> 31;
-}
-
-void OPPROTO op_rorl_T2_im(void)
-{
- int shift;
- shift = PARAM1;
- T2 = ((uint32_t)T2 >> shift) | (T2 << (32 - shift));
-}
-
-void OPPROTO op_rrxl_T2(void)
-{
- T2 = ((uint32_t)T2 >> 1) | ((uint32_t)env->CF << 31);
-}
-
-/* T1 based, use T0 as shift count */
-
-void OPPROTO op_shll_T1_T0(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32)
- T1 = 0;
- else
- T1 = T1 << shift;
- FORCE_RET();
-}
-
-void OPPROTO op_shrl_T1_T0(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32)
- T1 = 0;
- else
- T1 = (uint32_t)T1 >> shift;
- FORCE_RET();
-}
-
-void OPPROTO op_sarl_T1_T0(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32)
- shift = 31;
- T1 = (int32_t)T1 >> shift;
-}
-
-void OPPROTO op_rorl_T1_T0(void)
-{
- int shift;
- shift = T0 & 0x1f;
- if (shift) {
- T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
- }
- FORCE_RET();
-}
-
-/* T1 based, use T0 as shift count and compute CF */
-
-void OPPROTO op_shll_T1_T0_cc(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32) {
- if (shift == 32)
- env->CF = T1 & 1;
- else
- env->CF = 0;
- T1 = 0;
- } else if (shift != 0) {
- env->CF = (T1 >> (32 - shift)) & 1;
- T1 = T1 << shift;
- }
- FORCE_RET();
-}
-
-void OPPROTO op_shrl_T1_T0_cc(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32) {
- if (shift == 32)
- env->CF = (T1 >> 31) & 1;
- else
- env->CF = 0;
- T1 = 0;
- } else if (shift != 0) {
- env->CF = (T1 >> (shift - 1)) & 1;
- T1 = (uint32_t)T1 >> shift;
- }
- FORCE_RET();
-}
-
-void OPPROTO op_sarl_T1_T0_cc(void)
-{
- int shift;
- shift = T0 & 0xff;
- if (shift >= 32) {
- env->CF = (T1 >> 31) & 1;
- T1 = (int32_t)T1 >> 31;
- } else {
- env->CF = (T1 >> (shift - 1)) & 1;
- T1 = (int32_t)T1 >> shift;
- }
- FORCE_RET();
-}
-
-void OPPROTO op_rorl_T1_T0_cc(void)
-{
- int shift1, shift;
- shift1 = T0 & 0xff;
- shift = shift1 & 0x1f;
- if (shift == 0) {
- if (shift1 != 0)
- env->CF = (T1 >> 31) & 1;
- } else {
- env->CF = (T1 >> (shift - 1)) & 1;
- T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
- }
- FORCE_RET();
-}
-
-/* misc */
-void OPPROTO op_clz_T0(void)
-{
- int count;
- for (count = 32; T0 > 0; count--)
- T0 = T0 >> 1;
- T0 = count;
- FORCE_RET();
-}
-
-void OPPROTO op_sarl_T0_im(void)
-{
- T0 = (int32_t)T0 >> PARAM1;
-}
-
-/* Sign/zero extend */
-void OPPROTO op_sxth_T0(void)
-{
- T0 = (int16_t)T0;
-}
-
-void OPPROTO op_sxth_T1(void)
-{
- T1 = (int16_t)T1;
-}
-
-void OPPROTO op_sxtb_T1(void)
-{
- T1 = (int8_t)T1;
-}
-
-void OPPROTO op_uxtb_T1(void)
-{
- T1 = (uint8_t)T1;
-}
-
-void OPPROTO op_uxth_T1(void)
-{
- T1 = (uint16_t)T1;
-}
-
-void OPPROTO op_sxtb16_T1(void)
-{
- uint32_t res;
- res = (uint16_t)(int8_t)T1;
- res |= (uint32_t)(int8_t)(T1 >> 16) << 16;
- T1 = res;
-}
-
-void OPPROTO op_uxtb16_T1(void)
-{
- uint32_t res;
- res = (uint16_t)(uint8_t)T1;
- res |= (uint32_t)(uint8_t)(T1 >> 16) << 16;
- T1 = res;
-}
-
-#define SIGNBIT (uint32_t)0x80000000
-/* saturating arithmetic */
-void OPPROTO op_addl_T0_T1_setq(void)
-{
- uint32_t res;
-
- res = T0 + T1;
- if (((res ^ T0) & SIGNBIT) && !((T0 ^ T1) & SIGNBIT))
- env->QF = 1;
-
- T0 = res;
- FORCE_RET();
-}
-
-void OPPROTO op_addl_T0_T1_saturate(void)
-{
- uint32_t res;
-
- res = T0 + T1;
- if (((res ^ T0) & SIGNBIT) && !((T0 ^ T1) & SIGNBIT)) {
- env->QF = 1;
- if (T0 & SIGNBIT)
- T0 = 0x80000000;
- else
- T0 = 0x7fffffff;
- }
- else
- T0 = res;
-
- FORCE_RET();
-}
-
-void OPPROTO op_subl_T0_T1_saturate(void)
-{
- uint32_t res;
-
- res = T0 - T1;
- if (((res ^ T0) & SIGNBIT) && ((T0 ^ T1) & SIGNBIT)) {
- env->QF = 1;
- if (T0 & SIGNBIT)
- T0 = 0x80000000;
- else
- T0 = 0x7fffffff;
- }
- else
- T0 = res;
-
- FORCE_RET();
-}
-
-void OPPROTO op_double_T1_saturate(void)
-{
- int32_t val;
-
- val = T1;
- if (val >= 0x40000000) {
- T1 = 0x7fffffff;
- env->QF = 1;
- } else if (val <= (int32_t)0xc0000000) {
- T1 = 0x80000000;
- env->QF = 1;
- } else {
- T1 = val << 1;
- }
- FORCE_RET();
-}
-
-/* thumb shift by immediate */
-void OPPROTO op_shll_T0_im_thumb(void)
-{
- int shift;
- shift = PARAM1;
- if (shift != 0) {
- env->CF = (T1 >> (32 - shift)) & 1;
- T0 = T0 << shift;
- }
- env->NZF = T0;
- FORCE_RET();
-}
-
-void OPPROTO op_shrl_T0_im_thumb(void)
-{
- int shift;
-
- shift = PARAM1;
- if (shift == 0) {
- env->CF = ((uint32_t)shift) >> 31;
- T0 = 0;
- } else {
- env->CF = (T0 >> (shift - 1)) & 1;
- T0 = T0 >> shift;
- }
- env->NZF = T0;
- FORCE_RET();
-}
-
-void OPPROTO op_sarl_T0_im_thumb(void)
-{
- int shift;
-
- shift = PARAM1;
- if (shift == 0) {
- T0 = ((int32_t)T0) >> 31;
- env->CF = T0 & 1;
- } else {
- env->CF = (T0 >> (shift - 1)) & 1;
- T0 = ((int32_t)T0) >> shift;
- }
- env->NZF = T0;
- FORCE_RET();
-}
-
-/* exceptions */
-
-void OPPROTO op_swi(void)
-{
- env->exception_index = EXCP_SWI;
- cpu_loop_exit();
-}
-
-void OPPROTO op_undef_insn(void)
-{
- env->exception_index = EXCP_UDEF;
- cpu_loop_exit();
-}
-
-void OPPROTO op_debug(void)
-{
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit();
-}
-
-void OPPROTO op_wfi(void)
-{
- env->exception_index = EXCP_HLT;
- env->halted = 1;
- cpu_loop_exit();
-}
-
-void OPPROTO op_bkpt(void)
-{
- env->exception_index = EXCP_BKPT;
- cpu_loop_exit();
-}
-
-/* VFP support. We follow the convention used for VFP instrunctions:
- Single precition routines have a "s" suffix, double precision a
- "d" suffix. */
-
-#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void)
-
-#define VFP_BINOP(name) \
-VFP_OP(name, s) \
-{ \
- FT0s = float32_ ## name (FT0s, FT1s, &env->vfp.fp_status); \
-} \
-VFP_OP(name, d) \
-{ \
- FT0d = float64_ ## name (FT0d, FT1d, &env->vfp.fp_status); \
-}
-VFP_BINOP(add)
-VFP_BINOP(sub)
-VFP_BINOP(mul)
-VFP_BINOP(div)
-#undef VFP_BINOP
-
-#define VFP_HELPER(name) \
-VFP_OP(name, s) \
-{ \
- do_vfp_##name##s(); \
-} \
-VFP_OP(name, d) \
-{ \
- do_vfp_##name##d(); \
-}
-VFP_HELPER(abs)
-VFP_HELPER(sqrt)
-VFP_HELPER(cmp)
-VFP_HELPER(cmpe)
-#undef VFP_HELPER
-
-/* XXX: Will this do the right thing for NANs. Should invert the signbit
- without looking at the rest of the value. */
-VFP_OP(neg, s)
-{
- FT0s = float32_chs(FT0s);
-}
-
-VFP_OP(neg, d)
-{
- FT0d = float64_chs(FT0d);
-}
-
-VFP_OP(F1_ld0, s)
-{
- union {
- uint32_t i;
- float32 s;
- } v;
- v.i = 0;
- FT1s = v.s;
-}
-
-VFP_OP(F1_ld0, d)
-{
- union {
- uint64_t i;
- float64 d;
- } v;
- v.i = 0;
- FT1d = v.d;
-}
-
-/* Helper routines to perform bitwise copies between float and int. */
-static inline float32 vfp_itos(uint32_t i)
-{
- union {
- uint32_t i;
- float32 s;
- } v;
-
- v.i = i;
- return v.s;
-}
-
-static inline uint32_t vfp_stoi(float32 s)
-{
- union {
- uint32_t i;
- float32 s;
- } v;
-
- v.s = s;
- return v.i;
-}
-
-/* Integer to float conversion. */
-VFP_OP(uito, s)
-{
- FT0s = uint32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status);
-}
-
-VFP_OP(uito, d)
-{
- FT0d = uint32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status);
-}
-
-VFP_OP(sito, s)
-{
- FT0s = int32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status);
-}
-
-VFP_OP(sito, d)
-{
- FT0d = int32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status);
-}
-
-/* Float to integer conversion. */
-VFP_OP(toui, s)
-{
- FT0s = vfp_itos(float32_to_uint32(FT0s, &env->vfp.fp_status));
-}
-
-VFP_OP(toui, d)
-{
- FT0s = vfp_itos(float64_to_uint32(FT0d, &env->vfp.fp_status));
-}
-
-VFP_OP(tosi, s)
-{
- FT0s = vfp_itos(float32_to_int32(FT0s, &env->vfp.fp_status));
-}
-
-VFP_OP(tosi, d)
-{
- FT0s = vfp_itos(float64_to_int32(FT0d, &env->vfp.fp_status));
-}
-
-/* TODO: Set rounding mode properly. */
-VFP_OP(touiz, s)
-{
- FT0s = vfp_itos(float32_to_uint32_round_to_zero(FT0s, &env->vfp.fp_status));
-}
-
-VFP_OP(touiz, d)
-{
- FT0s = vfp_itos(float64_to_uint32_round_to_zero(FT0d, &env->vfp.fp_status));
-}
-
-VFP_OP(tosiz, s)
-{
- FT0s = vfp_itos(float32_to_int32_round_to_zero(FT0s, &env->vfp.fp_status));
-}
-
-VFP_OP(tosiz, d)
-{
- FT0s = vfp_itos(float64_to_int32_round_to_zero(FT0d, &env->vfp.fp_status));
-}
-
-/* floating point conversion */
-VFP_OP(fcvtd, s)
-{
- FT0d = float32_to_float64(FT0s, &env->vfp.fp_status);
-}
-
-VFP_OP(fcvts, d)
-{
- FT0s = float64_to_float32(FT0d, &env->vfp.fp_status);
-}
-
-/* Get and Put values from registers. */
-VFP_OP(getreg_F0, d)
-{
- FT0d = *(float64 *)((char *) env + PARAM1);
-}
-
-VFP_OP(getreg_F0, s)
-{
- FT0s = *(float32 *)((char *) env + PARAM1);
-}
-
-VFP_OP(getreg_F1, d)
-{
- FT1d = *(float64 *)((char *) env + PARAM1);
-}
-
-VFP_OP(getreg_F1, s)
-{
- FT1s = *(float32 *)((char *) env + PARAM1);
-}
-
-VFP_OP(setreg_F0, d)
-{
- *(float64 *)((char *) env + PARAM1) = FT0d;
-}
-
-VFP_OP(setreg_F0, s)
-{
- *(float32 *)((char *) env + PARAM1) = FT0s;
-}
-
-void OPPROTO op_vfp_movl_T0_fpscr(void)
-{
- do_vfp_get_fpscr ();
-}
-
-void OPPROTO op_vfp_movl_T0_fpscr_flags(void)
-{
- T0 = env->vfp.xregs[ARM_VFP_FPSCR] & (0xf << 28);
-}
-
-void OPPROTO op_vfp_movl_fpscr_T0(void)
-{
- do_vfp_set_fpscr();
-}
-
-void OPPROTO op_vfp_movl_T0_xreg(void)
-{
- T0 = env->vfp.xregs[PARAM1];
-}
-
-void OPPROTO op_vfp_movl_xreg_T0(void)
-{
- env->vfp.xregs[PARAM1] = T0;
-}
-
-/* Move between FT0s to T0 */
-void OPPROTO op_vfp_mrs(void)
-{
- T0 = vfp_stoi(FT0s);
-}
-
-void OPPROTO op_vfp_msr(void)
-{
- FT0s = vfp_itos(T0);
-}
-
-/* Move between FT0d and {T0,T1} */
-void OPPROTO op_vfp_mrrd(void)
-{
- CPU_DoubleU u;
-
- u.d = FT0d;
- T0 = u.l.lower;
- T1 = u.l.upper;
-}
-
-void OPPROTO op_vfp_mdrr(void)
-{
- CPU_DoubleU u;
-
- u.l.lower = T0;
- u.l.upper = T1;
- FT0d = u.d;
-}
-
-/* Copy the most significant bit to T0 to all bits of T1. */
-void OPPROTO op_signbit_T1_T0(void)
-{
- T1 = (int32_t)T0 >> 31;
-}
-
-void OPPROTO op_movl_cp15_T0(void)
-{
- helper_set_cp15(env, PARAM1, T0);
- FORCE_RET();
-}
-
-void OPPROTO op_movl_T0_cp15(void)
-{
- T0 = helper_get_cp15(env, PARAM1);
- FORCE_RET();
-}
-
-/* Access to user mode registers from privileged modes. */
-void OPPROTO op_movl_T0_user(void)
-{
- int regno = PARAM1;
- if (regno == 13) {
- T0 = env->banked_r13[0];
- } else if (regno == 14) {
- T0 = env->banked_r14[0];
- } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
- T0 = env->usr_regs[regno - 8];
- } else {
- T0 = env->regs[regno];
- }
- FORCE_RET();
-}
-
-
-void OPPROTO op_movl_user_T0(void)
-{
- int regno = PARAM1;
- if (regno == 13) {
- env->banked_r13[0] = T0;
- } else if (regno == 14) {
- env->banked_r14[0] = T0;
- } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
- env->usr_regs[regno - 8] = T0;
- } else {
- env->regs[regno] = T0;
- }
- FORCE_RET();
-}
-
-void OPPROTO op_movl_T2_T0(void)
-{
- T2 = T0;
-}
-
-void OPPROTO op_movl_T0_T2(void)
-{
- T0 = T2;
-}
diff --git a/target-arm/op_addsub.h b/target-arm/op_addsub.h
new file mode 100644
index 0000000..376ee27
--- /dev/null
+++ b/target-arm/op_addsub.h
@@ -0,0 +1,103 @@
+/*
+ * ARMv6 integer SIMD operations.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licenced under the GPL.
+ */
+
+#ifdef ARITH_GE
+#define GE_ARG , uint32_t *gep
+#define DECLARE_GE uint32_t ge = 0
+#define SET_GE *gep = ge
+#else
+#define GE_ARG
+#define DECLARE_GE do{}while(0)
+#define SET_GE do{}while(0)
+#endif
+
+#define RESULT(val, n, width) \
+ res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width)
+
+uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b, 0);
+ ADD16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD8(a, b, 0);
+ ADD8(a >> 8, b >> 8, 1);
+ ADD8(a >> 16, b >> 16, 2);
+ ADD8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b, 0);
+ SUB16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB8(a, b, 0);
+ SUB8(a >> 8, b >> 8, 1);
+ SUB8(a >> 16, b >> 16, 2);
+ SUB8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b, 0);
+ SUB16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b, 0);
+ ADD16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+#undef GE_ARG
+#undef DECLARE_GE
+#undef SET_GE
+#undef RESULT
+
+#undef ARITH_GE
+#undef PFX
+#undef ADD16
+#undef SUB16
+#undef ADD8
+#undef SUB8
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index acc83ba..36de55b 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -1,7 +1,7 @@
/*
* ARM helper routines
- *
- * Copyright (c) 2005 CodeSourcery, LLC
+ *
+ * Copyright (c) 2005-2007 CodeSourcery, LLC
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -18,6 +18,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "exec.h"
+#include "helpers.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
void raise_exception(int tt)
{
@@ -39,140 +43,26 @@ void cpu_unlock(void)
spin_unlock(&global_cpu_lock);
}
-/* VFP support. */
-
-void do_vfp_abss(void)
-{
- FT0s = float32_abs(FT0s);
-}
-
-void do_vfp_absd(void)
+uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
+ uint32_t rn, uint32_t maxindex)
{
- FT0d = float64_abs(FT0d);
-}
-
-void do_vfp_sqrts(void)
-{
- FT0s = float32_sqrt(FT0s, &env->vfp.fp_status);
-}
-
-void do_vfp_sqrtd(void)
-{
- FT0d = float64_sqrt(FT0d, &env->vfp.fp_status);
-}
-
-/* XXX: check quiet/signaling case */
-#define DO_VFP_cmp(p, size) \
-void do_vfp_cmp##p(void) \
-{ \
- uint32_t flags; \
- switch(float ## size ## _compare_quiet(FT0##p, FT1##p, &env->vfp.fp_status)) {\
- case 0: flags = 0x6; break;\
- case -1: flags = 0x8; break;\
- case 1: flags = 0x2; break;\
- default: case 2: flags = 0x3; break;\
- }\
- env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\
- | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
- FORCE_RET(); \
-}\
-\
-void do_vfp_cmpe##p(void) \
-{ \
- uint32_t flags; \
- switch(float ## size ## _compare(FT0##p, FT1##p, &env->vfp.fp_status)) {\
- case 0: flags = 0x6; break;\
- case -1: flags = 0x8; break;\
- case 1: flags = 0x2; break;\
- default: case 2: flags = 0x3; break;\
- }\
- env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\
- | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
- FORCE_RET(); \
-}
-DO_VFP_cmp(s, 32)
-DO_VFP_cmp(d, 64)
-#undef DO_VFP_cmp
-
-/* Convert host exception flags to vfp form. */
-static inline int vfp_exceptbits_from_host(int host_bits)
-{
- int target_bits = 0;
-
- if (host_bits & float_flag_invalid)
- target_bits |= 1;
- if (host_bits & float_flag_divbyzero)
- target_bits |= 2;
- if (host_bits & float_flag_overflow)
- target_bits |= 4;
- if (host_bits & float_flag_underflow)
- target_bits |= 8;
- if (host_bits & float_flag_inexact)
- target_bits |= 0x10;
- return target_bits;
-}
-
-/* Convert vfp exception flags to target form. */
-static inline int vfp_exceptbits_to_host(int target_bits)
-{
- int host_bits = 0;
-
- if (target_bits & 1)
- host_bits |= float_flag_invalid;
- if (target_bits & 2)
- host_bits |= float_flag_divbyzero;
- if (target_bits & 4)
- host_bits |= float_flag_overflow;
- if (target_bits & 8)
- host_bits |= float_flag_underflow;
- if (target_bits & 0x10)
- host_bits |= float_flag_inexact;
- return host_bits;
-}
-
-void do_vfp_set_fpscr(void)
-{
- int i;
- uint32_t changed;
-
- changed = env->vfp.xregs[ARM_VFP_FPSCR];
- env->vfp.xregs[ARM_VFP_FPSCR] = (T0 & 0xffc8ffff);
- env->vfp.vec_len = (T0 >> 16) & 7;
- env->vfp.vec_stride = (T0 >> 20) & 3;
-
- changed ^= T0;
- if (changed & (3 << 22)) {
- i = (T0 >> 22) & 3;
- switch (i) {
- case 0:
- i = float_round_nearest_even;
- break;
- case 1:
- i = float_round_up;
- break;
- case 2:
- i = float_round_down;
- break;
- case 3:
- i = float_round_to_zero;
- break;
+ uint32_t val;
+ uint32_t tmp;
+ int index;
+ int shift;
+ uint64_t *table;
+ table = (uint64_t *)&env->vfp.regs[rn];
+ val = 0;
+ for (shift = 0; shift < 32; shift += 8) {
+ index = (ireg >> shift) & 0xff;
+ if (index < maxindex) {
+ tmp = (table[index >> 3] >> (index & 7)) & 0xff;
+ val |= tmp << shift;
+ } else {
+ val |= def & (0xff << shift);
}
- set_float_rounding_mode(i, &env->vfp.fp_status);
}
-
- i = vfp_exceptbits_to_host((T0 >> 8) & 0x1f);
- set_float_exception_flags(i, &env->vfp.fp_status);
- /* XXX: FZ and DN are not implemented. */
-}
-
-void do_vfp_get_fpscr(void)
-{
- int i;
-
- T0 = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) | (env->vfp.vec_len << 16)
- | (env->vfp.vec_stride << 20);
- i = get_float_exception_flags(&env->vfp.fp_status);
- T0 |= vfp_exceptbits_from_host(i);
+ return val;
}
#if !defined(CONFIG_USER_ONLY)
@@ -180,7 +70,6 @@ void do_vfp_get_fpscr(void)
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
#define MMUSUFFIX _mmu
-#define GETPC() (__builtin_return_address(0))
#define ALIGNED_ONLY 1
#define SHIFT 0
@@ -195,10 +84,10 @@ static void do_unaligned_access (target_ulong addr, int is_write, int is_user, v
#define SHIFT 3
#include "softmmu_template.h"
-static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
+static void do_unaligned_access (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
//printf("::UNALIGNED:: addr=%lx is_write=%d is_user=%d retaddr=%p\n", addr, is_write, is_user, retaddr);
- if (is_user)
+ if (mmu_idx)
{
env = cpu_single_env;
env->cp15.c5_data = 0x00000001; /* corresponds to an alignment fault */
@@ -212,22 +101,22 @@ static void do_unaligned_access (target_ulong addr, int is_write, int is_user, v
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
+void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
TranslationBlock *tb;
CPUState *saved_env;
- target_phys_addr_t pc;
+ unsigned long pc;
int ret;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
saved_env = env;
env = cpu_single_env;
- ret = cpu_arm_handle_mmu_fault(env, addr, is_write, is_user, 1);
- if (__builtin_expect(ret, 0)) {
+ ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
+ if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- pc = (target_phys_addr_t)retaddr;
+ pc = (unsigned long)retaddr;
tb = tb_find_pc(pc);
if (tb) {
/* the PC is inside the translated code. It means that we have
@@ -246,7 +135,7 @@ void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
* The following functions are address translation helper functions
* for fast memory access in QEMU.
*/
-static unsigned long v2p_mmu(target_ulong addr, int is_user)
+static target_phys_addr_t v2p_mmu(target_ulong addr, int mmu_idx)
{
int index;
target_ulong tlb_addr;
@@ -255,13 +144,13 @@ static unsigned long v2p_mmu(target_ulong addr, int is_user)
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
- tlb_addr = env->tlb_table[is_user][index].addr_read;
+ tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[is_user][index].addend;
+ physaddr = addr + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
- tlb_fill(addr, 0, is_user, retaddr);
+ tlb_fill(addr, 0, mmu_idx, retaddr);
goto redo;
}
return physaddr;
@@ -272,22 +161,23 @@ redo:
* to the address of simulation host (not the physical
* address of simulated OS.
*/
-unsigned long v2p(target_ulong ptr, int is_user)
+target_phys_addr_t v2p(target_ulong ptr, int mmu_idx)
{
CPUState *saved_env;
int index;
target_ulong addr;
- unsigned long physaddr;
+ target_phys_addr_t physaddr;
saved_env = env;
env = cpu_single_env;
addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
- (addr & TARGET_PAGE_MASK), 0)) {
- return v2p_mmu(addr, is_user);
+ if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_read !=
+ (addr & TARGET_PAGE_MASK), 0))
+ {
+ physaddr = v2p_mmu(addr, mmu_idx);
} else {
- physaddr = addr + env->tlb_table[is_user][index].addend;
+ physaddr = (target_phys_addr_t)addr + env->tlb_table[mmu_idx][index].addend;
}
env = saved_env;
return physaddr;
@@ -311,7 +201,7 @@ void vmemcpy(target_ulong ptr, char *buf, int size)
}
/* copy memory from the QEMU buffer to simulated virtual space */
-void pmemcpy(target_ulong ptr, char *buf, int size)
+void pmemcpy(target_ulong ptr, const char *buf, int size)
{
if (buf == NULL) return;
while (size) {
@@ -349,5 +239,450 @@ void vstrcpy(target_ulong ptr, char *buf, int max)
}
}
#endif
-
#endif
+
+/* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
+ instructions into helper.c */
+uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
+ env->QF = 1;
+ return res;
+}
+
+uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(double_saturate)(int32_t val)
+{
+ uint32_t res;
+ if (val >= 0x40000000) {
+ res = ~SIGNBIT;
+ env->QF = 1;
+ } else if (val <= (int32_t)0xc0000000) {
+ res = SIGNBIT;
+ env->QF = 1;
+ } else {
+ res = val << 1;
+ }
+ return res;
+}
+
+uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ env->QF = 1;
+ res = ~0;
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ env->QF = 1;
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturation. */
+static inline uint32_t do_ssat(int32_t val, int shift)
+{
+ int32_t top;
+ uint32_t mask;
+
+ top = val >> shift;
+ mask = (1u << shift) - 1;
+ if (top > 0) {
+ env->QF = 1;
+ return mask;
+ } else if (top < -1) {
+ env->QF = 1;
+ return ~mask;
+ }
+ return val;
+}
+
+/* Unsigned saturation. */
+static inline uint32_t do_usat(int32_t val, int shift)
+{
+ uint32_t max;
+
+ max = (1u << shift) - 1;
+ if (val < 0) {
+ env->QF = 1;
+ return 0;
+ } else if (val > max) {
+ env->QF = 1;
+ return max;
+ }
+ return val;
+}
+
+/* Signed saturate. */
+uint32_t HELPER(ssat)(uint32_t x, uint32_t shift)
+{
+ return do_ssat(x, shift);
+}
+
+/* Dual halfword signed saturate. */
+uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_ssat((int16_t)x, shift);
+ res |= do_ssat(((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+/* Unsigned saturate. */
+uint32_t HELPER(usat)(uint32_t x, uint32_t shift)
+{
+ return do_usat(x, shift);
+}
+
+/* Dual halfword unsigned saturate. */
+uint32_t HELPER(usat16)(uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_usat((int16_t)x, shift);
+ res |= do_usat(((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+void HELPER(wfi)(void)
+{
+ env->exception_index = EXCP_HLT;
+ env->halted = 1;
+ cpu_loop_exit();
+}
+
+void HELPER(exception)(uint32_t excp)
+{
+ env->exception_index = excp;
+ cpu_loop_exit();
+}
+
+uint32_t HELPER(cpsr_read)(void)
+{
+ return cpsr_read(env) & ~CPSR_EXEC;
+}
+
+void HELPER(cpsr_write)(uint32_t val, uint32_t mask)
+{
+ cpsr_write(env, val, mask);
+}
+
+/* Access to user mode registers from privileged modes. */
+uint32_t HELPER(get_user_reg)(uint32_t regno)
+{
+ uint32_t val;
+
+ if (regno == 13) {
+ val = env->banked_r13[0];
+ } else if (regno == 14) {
+ val = env->banked_r14[0];
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ val = env->usr_regs[regno - 8];
+ } else {
+ val = env->regs[regno];
+ }
+ return val;
+}
+
+void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
+{
+ if (regno == 13) {
+ env->banked_r13[0] = val;
+ } else if (regno == 14) {
+ env->banked_r14[0] = val;
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ env->usr_regs[regno - 8] = val;
+ } else {
+ env->regs[regno] = val;
+ }
+}
+
+/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
+ The only way to do that in TCG is a conditional branch, which clobbers
+ all our temporaries. For now implement these as helper functions. */
+
+uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = T0 + T1;
+ env->NF = env->ZF = result;
+ env->CF = result < a;
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a + b;
+ env->CF = result < a;
+ } else {
+ result = a + b + 1;
+ env->CF = result <= a;
+ }
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = a - b;
+ env->NF = env->ZF = result;
+ env->CF = a >= b;
+ env->VF = (a ^ b) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a - b - 1;
+ env->CF = a > b;
+ } else {
+ result = a - b;
+ env->CF = a >= b;
+ }
+ env->VF = (a ^ b) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+/* Similarly for variable shift instructions. */
+
+uint32_t HELPER(shl)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32)
+ return 0;
+ return x << shift;
+}
+
+uint32_t HELPER(shr)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32)
+ return 0;
+ return (uint32_t)x >> shift;
+}
+
+uint32_t HELPER(sar)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32)
+ shift = 31;
+ return (int32_t)x >> shift;
+}
+
+uint32_t HELPER(ror)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift == 0)
+ return x;
+ return (x >> shift) | (x << (32 - shift));
+}
+
+uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = x & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (32 - shift)) & 1;
+ return x << shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = (x >> 31) & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ env->CF = (x >> 31) & 1;
+ return (int32_t)x >> 31;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return (int32_t)x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
+{
+ int shift1, shift;
+ shift1 = i & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0)
+ env->CF = (x >> 31) & 1;
+ return x;
+ } else {
+ env->CF = (x >> (shift - 1)) & 1;
+ return ((uint32_t)x >> shift) | (x << (32 - shift));
+ }
+}
+
+uint64_t HELPER(neon_add_saturate_s64)(uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
+ env->QF = 1;
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_add_saturate_u64)(uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (res < src1) {
+ env->QF = 1;
+ res = ~(uint64_t)0;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 - src2;
+ if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
+ env->QF = 1;
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ if (src1 < src2) {
+ env->QF = 1;
+ res = 0;
+ } else {
+ res = src1 - src2;
+ }
+ return res;
+}
+
+/* These need to return a pair of value, so still use T0/T1. */
+/* Transpose. Argument order is rather strange to avoid special casing
+ the tranlation code.
+ On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */
+void HELPER(neon_trn_u8)(void)
+{
+ uint32_t rd;
+ uint32_t rm;
+ rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
+ rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
+ T0 = rd;
+ T1 = rm;
+ FORCE_RET();
+}
+
+void HELPER(neon_trn_u16)(void)
+{
+ uint32_t rd;
+ uint32_t rm;
+ rd = (T0 << 16) | (T1 & 0xffff);
+ rm = (T1 >> 16) | (T0 & 0xffff0000);
+ T0 = rd;
+ T1 = rm;
+ FORCE_RET();
+}
+
+/* Worker routines for zip and unzip. */
+void HELPER(neon_unzip_u8)(void)
+{
+ uint32_t rd;
+ uint32_t rm;
+ rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
+ | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
+ rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
+ | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
+ T0 = rd;
+ T1 = rm;
+ FORCE_RET();
+}
+
+void HELPER(neon_zip_u8)(void)
+{
+ uint32_t rd;
+ uint32_t rm;
+ rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
+ | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
+ rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
+ | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
+ T0 = rd;
+ T1 = rm;
+ FORCE_RET();
+}
+
+void HELPER(neon_zip_u16)(void)
+{
+ uint32_t tmp;
+
+ tmp = (T0 & 0xffff) | (T1 << 16);
+ T1 = (T1 & 0xffff0000) | (T0 >> 16);
+ T0 = tmp;
+ FORCE_RET();
+}
diff --git a/target-arm/op_mem.h b/target-arm/op_mem.h
deleted file mode 100644
index 9d4d5c0..0000000
--- a/target-arm/op_mem.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* ARM memory operations. */
-
-#ifdef GEN_TRACE
-/* Load from address T1 into T0. */
-#define MEM_LD_OP(name) \
-void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
-{ \
- extern int tracing; \
- extern void dcache_load(uint32_t addr); \
- if (tracing) \
- dcache_load(T1); \
- T0 = glue(ld##name,MEMSUFFIX)(T1); \
- FORCE_RET(); \
-}
-#else
-/* Load from address T1 into T0. */
-#define MEM_LD_OP(name) \
-void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \
-{ \
- T0 = glue(ld##name,MEMSUFFIX)(T1); \
- FORCE_RET(); \
-}
-#endif
-
-MEM_LD_OP(ub)
-MEM_LD_OP(sb)
-MEM_LD_OP(uw)
-MEM_LD_OP(sw)
-MEM_LD_OP(l)
-
-#undef MEM_LD_OP
-
-#ifdef GEN_TRACE
-/* Store T0 to address T1. */
-#define MEM_ST_OP(name) \
-void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \
-{ \
- extern int tracing; \
- extern void dcache_store(uint32_t addr, uint32_t val); \
- if (tracing) \
- dcache_store(T1, T0); \
- glue(st##name,MEMSUFFIX)(T1, T0); \
- FORCE_RET(); \
-}
-#else
-/* Store T0 to address T1. */
-#define MEM_ST_OP(name) \
-void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \
-{ \
- glue(st##name,MEMSUFFIX)(T1, T0); \
- FORCE_RET(); \
-}
-#endif
-
-MEM_ST_OP(b)
-MEM_ST_OP(w)
-MEM_ST_OP(l)
-
-#undef MEM_ST_OP
-
-#ifdef GEN_TRACE
-/* Swap T0 with memory at address T1. */
-/* ??? Is this exception safe? */
-#define MEM_SWP_OP(name, lname) \
-void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \
-{ \
- extern int tracing; \
- extern void dcache_swp(uint32_t addr); \
- uint32_t tmp; \
- cpu_lock(); \
- if (tracing) \
- dcache_swp(T1); \
- tmp = glue(ld##lname,MEMSUFFIX)(T1); \
- glue(st##name,MEMSUFFIX)(T1, T0); \
- T0 = tmp; \
- cpu_unlock(); \
- FORCE_RET(); \
-}
-#else
-/* Swap T0 with memory at address T1. */
-/* ??? Is this exception safe? */
-#define MEM_SWP_OP(name, lname) \
-void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \
-{ \
- uint32_t tmp; \
- cpu_lock(); \
- tmp = glue(ld##lname,MEMSUFFIX)(T1); \
- glue(st##name,MEMSUFFIX)(T1, T0); \
- T0 = tmp; \
- cpu_unlock(); \
- FORCE_RET(); \
-}
-#endif
-
-MEM_SWP_OP(b, ub)
-MEM_SWP_OP(l, l)
-
-#undef MEM_SWP_OP
-
-/* Floating point load/store. Address is in T1 */
-#define VFP_MEM_OP(p, w) \
-void OPPROTO glue(op_vfp_ld##p,MEMSUFFIX)(void) \
-{ \
- FT0##p = glue(ldf##w,MEMSUFFIX)(T1); \
- FORCE_RET(); \
-} \
-void OPPROTO glue(op_vfp_st##p,MEMSUFFIX)(void) \
-{ \
- glue(stf##w,MEMSUFFIX)(T1, FT0##p); \
- FORCE_RET(); \
-}
-
-VFP_MEM_OP(s,l)
-VFP_MEM_OP(d,q)
-
-#undef VFP_MEM_OP
-
-#undef MEMSUFFIX
diff --git a/target-arm/op_template.h b/target-arm/op_template.h
deleted file mode 100644
index fb2add1..0000000
--- a/target-arm/op_template.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * ARM micro operations (templates for various register related
- * operations)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef SET_REG
-#define SET_REG(x) REG = x
-#endif
-
-void OPPROTO glue(op_movl_T0_, REGNAME)(void)
-{
- T0 = REG;
-}
-
-void OPPROTO glue(op_movl_T1_, REGNAME)(void)
-{
- T1 = REG;
-}
-
-void OPPROTO glue(op_movl_T2_, REGNAME)(void)
-{
- T2 = REG;
-}
-
-void OPPROTO glue(glue(op_movl_, REGNAME), _T0)(void)
-{
- SET_REG (T0);
-}
-
-void OPPROTO glue(glue(op_movl_, REGNAME), _T1)(void)
-{
- SET_REG (T1);
-}
-
-#undef REG
-#undef REGNAME
-#undef SET_REG
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 663a730..ff27d28 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -2,7 +2,8 @@
* ARM translation
*
* Copyright (c) 2003 Fabrice Bellard
- * Copyright (c) 2005 CodeSourcery, LLC
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Copyright (c) 2007 OpenedHand, Ltd.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -27,18 +28,21 @@
#include "cpu.h"
#include "exec-all.h"
#include "disas.h"
-#ifdef GEN_TRACE
+#include "tcg-op.h"
+#include "qemu-log.h"
+
+#ifdef CONFIG_TRACE
#include "trace.h"
#endif
-typedef int (*gen_intermediate_code_func)(CPUState *env, struct TranslationBlock *tb);
-
-extern gen_intermediate_code_func _gen_intermediate_code;
-extern gen_intermediate_code_func _gen_intermediate_code_pc;
+#define GEN_HELPER 1
+#include "helpers.h"
-#define ENABLE_ARCH_5J 0
-#define ENABLE_ARCH_6 1
-#define ENABLE_ARCH_6T2 1
+#define ENABLE_ARCH_5J 0
+#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
+#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
+#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
+#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
@@ -50,9 +54,13 @@ typedef struct DisasContext {
int condjmp;
/* The label that will be jumped to when the instruction is skipped. */
int condlabel;
+ /* Thumb-2 condtional execution bits. */
+ int condexec_mask;
+ int condexec_cond;
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
+ int is_mem;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
@@ -64,55 +72,731 @@ typedef struct DisasContext {
#define IS_USER(s) (s->user)
#endif
-#define DISAS_JUMP_NEXT 4
-
-#ifdef USE_DIRECT_JUMP
-#define TBPARAM(x)
-#else
-#define TBPARAM(x) (long)(x)
+#ifdef CONFIG_TRACE
+#include "helpers.h"
#endif
-/* XXX: move that elsewhere */
-static uint16_t *gen_opc_ptr;
-static uint32_t *gen_opparam_ptr;
-extern FILE *logfile;
-extern int loglevel;
+/* These instructions trap after executing, so defer them until after the
+ conditional executions state has been updated. */
+#define DISAS_WFI 4
+#define DISAS_SWI 5
-enum {
-#define DEF(s, n, copy_size) INDEX_op_ ## s,
-#ifdef GEN_TRACE
-#include "opc-trace.h"
-#else
-#include "opc.h"
-#endif
-#undef DEF
- NB_OPS,
-};
+static TCGv cpu_env;
+/* We reuse the same 64-bit temporaries for efficiency. */
+static TCGv cpu_V0, cpu_V1, cpu_M0;
+
+/* FIXME: These should be removed. */
+static TCGv cpu_T[2];
+static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
+
+#define ICOUNT_TEMP cpu_T[0]
+#include "gen-icount.h"
+
+/* initialize TCG globals. */
+void arm_translate_init(void)
+{
+ cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
+
+ cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
+ cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
+}
+
+/* The code generator doesn't like lots of temporaries, so maintain our own
+ cache for reuse within a function. */
+#define MAX_TEMPS 8
+static int num_temps;
+static TCGv temps[MAX_TEMPS];
+
+/* Allocate a temporary variable. */
+static TCGv new_tmp(void)
+{
+ TCGv tmp;
+ if (num_temps == MAX_TEMPS)
+ abort();
+
+ if (GET_TCGV(temps[num_temps]))
+ return temps[num_temps++];
+
+ tmp = tcg_temp_new(TCG_TYPE_I32);
+ temps[num_temps++] = tmp;
+ return tmp;
+}
+
+/* Release a temporary variable. */
+static void dead_tmp(TCGv tmp)
+{
+ int i;
+ num_temps--;
+ i = num_temps;
+ if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
+ return;
+
+ /* Shuffle this temp to the last slot. */
+ while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
+ i--;
+ while (i < num_temps) {
+ temps[i] = temps[i + 1];
+ i++;
+ }
+ temps[i] = tmp;
+}
+
+static inline TCGv load_cpu_offset(int offset)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
-#ifdef GEN_TRACE
-#include "gen-op-trace.h"
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
+
+static inline void store_cpu_offset(TCGv var, int offset)
+{
+ tcg_gen_st_i32(var, cpu_env, offset);
+ dead_tmp(var);
+}
+
+#define store_cpu_field(var, name) \
+ store_cpu_offset(var, offsetof(CPUState, name))
+
+/* Set a variable to the value of a CPU register. */
+static void load_reg_var(DisasContext *s, TCGv var, int reg)
+{
+ if (reg == 15) {
+ uint32_t addr;
+ /* normaly, since we updated PC, we need only to add one insn */
+ if (s->thumb)
+ addr = (long)s->pc + 2;
+ else
+ addr = (long)s->pc + 4;
+ tcg_gen_movi_i32(var, addr);
+ } else {
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
+ }
+}
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv load_reg(DisasContext *s, int reg)
+{
+ TCGv tmp = new_tmp();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+/* Set a CPU register. The source must be a temporary and will be
+ marked as dead. */
+static void store_reg(DisasContext *s, int reg, TCGv var)
+{
+ if (reg == 15) {
+ tcg_gen_andi_i32(var, var, ~1);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
+ dead_tmp(var);
+}
+
+
+/* Basic operations. */
+#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
+#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
+#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
+#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
+
+#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
+#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
+
+#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
+#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
+
+#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
+#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
+#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
+#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
+
+#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
+#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
+#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
+#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
+#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
+
+/* Value extensions. */
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
+#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
+#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
+
+#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
+#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
+
+#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
+
+#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
+
+#ifdef CONFIG_TRACE
+static void gen_traceTicks(int count)
+{
+ TCGv t0 = new_tmp();
+ tcg_gen_movi_i32(t0, count);
+ gen_helper_traceTicks(t0);
+ dead_tmp(t0);
+}
+
+static void gen_traceBB(uint64_t bb_num, target_phys_addr_t tb)
+{
+#if HOST_LONG_BITS ==64
+ TCGv t0 = tcg_const_i64(bb_num);
+ TCGv t1 = tcg_const_i64(tb);
+ gen_helper_traceBB64(t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
#else
-#include "gen-op.h"
+ TCGv t0 = new_tmp();
+ TCGv t1 = new_tmp();
+ TCGv t2 = new_tmp();
+ tcg_gen_movi_i32(t0, (int32_t)(bb_num >> 32));
+ tcg_gen_movi_i32(t1, (int32_t)(bb_num));
+ tcg_gen_movi_i32(t2, (int32_t)tb);
+ gen_helper_traceBB32(t0, t1, t2);
+ dead_tmp(t2);
+ dead_tmp(t1);
+ dead_tmp(t0);
#endif
+}
+#endif /* CONFIG_TRACE */
+
+static void gen_exception(int excp)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, excp);
+ gen_helper_exception(tmp);
+ dead_tmp(tmp);
+}
+
+static void gen_smul_dual(TCGv a, TCGv b)
+{
+ TCGv tmp1 = new_tmp();
+ TCGv tmp2 = new_tmp();
+ tcg_gen_ext16s_i32(tmp1, a);
+ tcg_gen_ext16s_i32(tmp2, b);
+ tcg_gen_mul_i32(tmp1, tmp1, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_sari_i32(a, a, 16);
+ tcg_gen_sari_i32(b, b, 16);
+ tcg_gen_mul_i32(b, b, a);
+ tcg_gen_mov_i32(a, tmp1);
+ dead_tmp(tmp1);
+}
+
+/* Byteswap each halfword. */
+static void gen_rev16(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 8);
+ tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
+ tcg_gen_shli_i32(var, var, 8);
+ tcg_gen_andi_i32(var, var, 0xff00ff00);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
+/* Byteswap low halfword and sign extend. */
+static void gen_revsh(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 8);
+ tcg_gen_andi_i32(tmp, tmp, 0x00ff);
+ tcg_gen_shli_i32(var, var, 8);
+ tcg_gen_ext8s_i32(var, var);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
+/* Unsigned bitfield extract. */
+static void gen_ubfx(TCGv var, int shift, uint32_t mask)
+{
+ if (shift)
+ tcg_gen_shri_i32(var, var, shift);
+ tcg_gen_andi_i32(var, var, mask);
+}
-static GenOpFunc1 *gen_test_cc[14] = {
- gen_op_test_eq,
- gen_op_test_ne,
- gen_op_test_cs,
- gen_op_test_cc,
- gen_op_test_mi,
- gen_op_test_pl,
- gen_op_test_vs,
- gen_op_test_vc,
- gen_op_test_hi,
- gen_op_test_ls,
- gen_op_test_ge,
- gen_op_test_lt,
- gen_op_test_gt,
- gen_op_test_le,
+/* Signed bitfield extract. */
+static void gen_sbfx(TCGv var, int shift, int width)
+{
+ uint32_t signbit;
+
+ if (shift)
+ tcg_gen_sari_i32(var, var, shift);
+ if (shift + width < 32) {
+ signbit = 1u << (width - 1);
+ tcg_gen_andi_i32(var, var, (1u << width) - 1);
+ tcg_gen_xori_i32(var, var, signbit);
+ tcg_gen_subi_i32(var, var, signbit);
+ }
+}
+
+/* Bitfield insertion. Insert val into base. Clobbers base and val. */
+static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
+{
+ tcg_gen_andi_i32(val, val, mask);
+ tcg_gen_shli_i32(val, val, shift);
+ tcg_gen_andi_i32(base, base, ~(mask << shift));
+ tcg_gen_or_i32(dest, base, val);
+}
+
+/* Round the top 32 bits of a 64-bit value. */
+static void gen_roundqd(TCGv a, TCGv b)
+{
+ tcg_gen_shri_i32(a, a, 31);
+ tcg_gen_add_i32(a, a, b);
+}
+
+/* FIXME: Most targets have native widening multiplication.
+ It would be good to use that instead of a full wide multiply. */
+/* 32x32->64 multiply. Marks inputs as dead. */
+static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
+{
+ TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_extu_i32_i64(tmp1, a);
+ dead_tmp(a);
+ tcg_gen_extu_i32_i64(tmp2, b);
+ dead_tmp(b);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ return tmp1;
+}
+
+static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
+{
+ TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext_i32_i64(tmp1, a);
+ dead_tmp(a);
+ tcg_gen_ext_i32_i64(tmp2, b);
+ dead_tmp(b);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ return tmp1;
+}
+
+/* Unsigned 32x32->64 multiply. */
+static void gen_op_mull_T0_T1(void)
+{
+ TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
+ tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
+ tcg_gen_shri_i64(tmp1, tmp1, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
+}
+
+/* Signed 32x32->64 multiply. */
+static void gen_imull(TCGv a, TCGv b)
+{
+ TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext_i32_i64(tmp1, a);
+ tcg_gen_ext_i32_i64(tmp2, b);
+ tcg_gen_mul_i64(tmp1, tmp1, tmp2);
+ tcg_gen_trunc_i64_i32(a, tmp1);
+ tcg_gen_shri_i64(tmp1, tmp1, 32);
+ tcg_gen_trunc_i64_i32(b, tmp1);
+}
+#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
+
+/* Swap low and high halfwords. */
+static void gen_swap_half(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 16);
+ tcg_gen_shli_i32(var, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
+/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
+ tmp = (t0 ^ t1) & 0x8000;
+ t0 &= ~0x8000;
+ t1 &= ~0x8000;
+ t0 = (t0 + t1) ^ tmp;
+ */
+
+static void gen_add16(TCGv t0, TCGv t1)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andi_i32(tmp, tmp, 0x8000);
+ tcg_gen_andi_i32(t0, t0, ~0x8000);
+ tcg_gen_andi_i32(t1, t1, ~0x8000);
+ tcg_gen_add_i32(t0, t0, t1);
+ tcg_gen_xor_i32(t0, t0, tmp);
+ dead_tmp(tmp);
+ dead_tmp(t1);
+}
+
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
+
+/* Set CF to the top bit of var. */
+static void gen_set_CF_bit31(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 31);
+ gen_set_CF(var);
+ dead_tmp(tmp);
+}
+
+/* Set N and Z flags from var. */
+static inline void gen_logic_CC(TCGv var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
+}
+
+/* T0 += T1 + CF. */
+static void gen_adc_T0_T1(void)
+{
+ TCGv tmp;
+ gen_op_addl_T0_T1();
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
+ dead_tmp(tmp);
+}
+
+/* dest = T0 - T1 + CF - 1. */
+static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_sub_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ tcg_gen_subi_i32(dest, dest, 1);
+ dead_tmp(tmp);
+}
+
+#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
+#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
+
+/* T0 &= ~T1. Clobbers T1. */
+/* FIXME: Implement bic natively. */
+static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_not_i32(tmp, t1);
+ tcg_gen_and_i32(dest, t0, tmp);
+ dead_tmp(tmp);
+}
+static inline void gen_op_bicl_T0_T1(void)
+{
+ gen_op_notl_T1();
+ gen_op_andl_T0_T1();
+}
+
+/* FIXME: Implement this natively. */
+#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
+
+/* FIXME: Implement this natively. */
+static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
+{
+ TCGv tmp;
+
+ if (i == 0)
+ return;
+
+ tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, t1, i);
+ tcg_gen_shli_i32(t1, t1, 32 - i);
+ tcg_gen_or_i32(t0, t1, tmp);
+ dead_tmp(tmp);
+}
+
+static void shifter_out_im(TCGv var, int shift)
+{
+ TCGv tmp = new_tmp();
+ if (shift == 0) {
+ tcg_gen_andi_i32(tmp, var, 1);
+ } else {
+ tcg_gen_shri_i32(tmp, var, shift);
+ if (shift != 31);
+ tcg_gen_andi_i32(tmp, tmp, 1);
+ }
+ gen_set_CF(tmp);
+ dead_tmp(tmp);
+}
+
+/* Shift by immediate. Includes special handling for shift == 0. */
+static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
+{
+ switch (shiftop) {
+ case 0: /* LSL */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, 32 - shift);
+ tcg_gen_shli_i32(var, var, shift);
+ }
+ break;
+ case 1: /* LSR */
+ if (shift == 0) {
+ if (flags) {
+ tcg_gen_shri_i32(var, var, 31);
+ gen_set_CF(var);
+ }
+ tcg_gen_movi_i32(var, 0);
+ } else {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_shri_i32(var, var, shift);
+ }
+ break;
+ case 2: /* ASR */
+ if (shift == 0)
+ shift = 32;
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ if (shift == 32)
+ shift = 31;
+ tcg_gen_sari_i32(var, var, shift);
+ break;
+ case 3: /* ROR/RRX */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_rori_i32(var, var, shift); break;
+ } else {
+ TCGv tmp = load_cpu_field(CF);
+ if (flags)
+ shifter_out_im(var, 0);
+ tcg_gen_shri_i32(var, var, 1);
+ tcg_gen_shli_i32(tmp, tmp, 31);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+ }
+ }
};
-static const uint8_t table_logic_cc[16] = {
+static inline void gen_arm_shift_reg(TCGv var, int shiftop,
+ TCGv shift, int flags)
+{
+ if (flags) {
+ switch (shiftop) {
+ case 0: gen_helper_shl_cc(var, var, shift); break;
+ case 1: gen_helper_shr_cc(var, var, shift); break;
+ case 2: gen_helper_sar_cc(var, var, shift); break;
+ case 3: gen_helper_ror_cc(var, var, shift); break;
+ }
+ } else {
+ switch (shiftop) {
+ case 0: gen_helper_shl(var, var, shift); break;
+ case 1: gen_helper_shr(var, var, shift); break;
+ case 2: gen_helper_sar(var, var, shift); break;
+ case 3: gen_helper_ror(var, var, shift); break;
+ }
+ }
+ dead_tmp(shift);
+}
+
+#define PAS_OP(pfx) \
+ switch (op2) { \
+ case 0: gen_pas_helper(glue(pfx,add16)); break; \
+ case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
+ case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
+ case 3: gen_pas_helper(glue(pfx,sub16)); break; \
+ case 4: gen_pas_helper(glue(pfx,add8)); break; \
+ case 7: gen_pas_helper(glue(pfx,sub8)); break; \
+ }
+static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
+{
+ TCGv tmp;
+
+ switch (op1) {
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
+ case 1:
+ tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ PAS_OP(s)
+ break;
+ case 5:
+ tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ PAS_OP(u)
+ break;
+#undef gen_pas_helper
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
+ case 2:
+ PAS_OP(q);
+ break;
+ case 3:
+ PAS_OP(sh);
+ break;
+ case 6:
+ PAS_OP(uq);
+ break;
+ case 7:
+ PAS_OP(uh);
+ break;
+#undef gen_pas_helper
+ }
+}
+#undef PAS_OP
+
+/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
+#define PAS_OP(pfx) \
+ switch (op2) { \
+ case 0: gen_pas_helper(glue(pfx,add8)); break; \
+ case 1: gen_pas_helper(glue(pfx,add16)); break; \
+ case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
+ case 4: gen_pas_helper(glue(pfx,sub8)); break; \
+ case 5: gen_pas_helper(glue(pfx,sub16)); break; \
+ case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
+ }
+static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
+{
+ TCGv tmp;
+
+ switch (op1) {
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
+ case 0:
+ tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ PAS_OP(s)
+ break;
+ case 4:
+ tmp = tcg_temp_new(TCG_TYPE_PTR);
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ PAS_OP(u)
+ break;
+#undef gen_pas_helper
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
+ case 1:
+ PAS_OP(q);
+ break;
+ case 2:
+ PAS_OP(sh);
+ break;
+ case 5:
+ PAS_OP(uq);
+ break;
+ case 6:
+ PAS_OP(uh);
+ break;
+#undef gen_pas_helper
+ }
+}
+#undef PAS_OP
+
+static void gen_test_cc(int cc, int label)
+{
+ TCGv tmp;
+ TCGv tmp2;
+ int inv;
+
+ switch (cc) {
+ case 0: /* eq: Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 1: /* ne: !Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 2: /* cs: C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 3: /* cc: !C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 4: /* mi: N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 5: /* pl: !N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 6: /* vs: V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 7: /* vc: !V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 8: /* hi: C && !Z */
+ inv = gen_new_label();
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 9: /* ls: !C || Z */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 10: /* ge: N == V -> N ^ V == 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 11: /* lt: N != V -> N ^ V != 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 12: /* gt: !Z && N == V */
+ inv = gen_new_label();
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 13: /* le: Z || N != V */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ default:
+ fprintf(stderr, "Bad condition code 0x%x\n", cc);
+ abort();
+ }
+ dead_tmp(tmp);
+}
+
+const uint8_t table_logic_cc[16] = {
1, /* and */
1, /* xor */
0, /* sub */
@@ -131,223 +815,134 @@ static const uint8_t table_logic_cc[16] = {
1, /* mvn */
};
-static GenOpFunc1 *gen_shift_T1_im[4] = {
- gen_op_shll_T1_im,
- gen_op_shrl_T1_im,
- gen_op_sarl_T1_im,
- gen_op_rorl_T1_im,
-};
-
-static GenOpFunc *gen_shift_T1_0[4] = {
- NULL,
- gen_op_shrl_T1_0,
- gen_op_sarl_T1_0,
- gen_op_rrxl_T1,
-};
-
-static GenOpFunc1 *gen_shift_T2_im[4] = {
- gen_op_shll_T2_im,
- gen_op_shrl_T2_im,
- gen_op_sarl_T2_im,
- gen_op_rorl_T2_im,
-};
-
-static GenOpFunc *gen_shift_T2_0[4] = {
- NULL,
- gen_op_shrl_T2_0,
- gen_op_sarl_T2_0,
- gen_op_rrxl_T2,
-};
-
-static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
- gen_op_shll_T1_im_cc,
- gen_op_shrl_T1_im_cc,
- gen_op_sarl_T1_im_cc,
- gen_op_rorl_T1_im_cc,
-};
-
-static GenOpFunc *gen_shift_T1_0_cc[4] = {
- NULL,
- gen_op_shrl_T1_0_cc,
- gen_op_sarl_T1_0_cc,
- gen_op_rrxl_T1_cc,
-};
-
-static GenOpFunc *gen_shift_T1_T0[4] = {
- gen_op_shll_T1_T0,
- gen_op_shrl_T1_T0,
- gen_op_sarl_T1_T0,
- gen_op_rorl_T1_T0,
-};
-
-static GenOpFunc *gen_shift_T1_T0_cc[4] = {
- gen_op_shll_T1_T0_cc,
- gen_op_shrl_T1_T0_cc,
- gen_op_sarl_T1_T0_cc,
- gen_op_rorl_T1_T0_cc,
-};
-
-static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
- {
- gen_op_movl_T0_r0,
- gen_op_movl_T0_r1,
- gen_op_movl_T0_r2,
- gen_op_movl_T0_r3,
- gen_op_movl_T0_r4,
- gen_op_movl_T0_r5,
- gen_op_movl_T0_r6,
- gen_op_movl_T0_r7,
- gen_op_movl_T0_r8,
- gen_op_movl_T0_r9,
- gen_op_movl_T0_r10,
- gen_op_movl_T0_r11,
- gen_op_movl_T0_r12,
- gen_op_movl_T0_r13,
- gen_op_movl_T0_r14,
- gen_op_movl_T0_r15,
- },
- {
- gen_op_movl_T1_r0,
- gen_op_movl_T1_r1,
- gen_op_movl_T1_r2,
- gen_op_movl_T1_r3,
- gen_op_movl_T1_r4,
- gen_op_movl_T1_r5,
- gen_op_movl_T1_r6,
- gen_op_movl_T1_r7,
- gen_op_movl_T1_r8,
- gen_op_movl_T1_r9,
- gen_op_movl_T1_r10,
- gen_op_movl_T1_r11,
- gen_op_movl_T1_r12,
- gen_op_movl_T1_r13,
- gen_op_movl_T1_r14,
- gen_op_movl_T1_r15,
- },
- {
- gen_op_movl_T2_r0,
- gen_op_movl_T2_r1,
- gen_op_movl_T2_r2,
- gen_op_movl_T2_r3,
- gen_op_movl_T2_r4,
- gen_op_movl_T2_r5,
- gen_op_movl_T2_r6,
- gen_op_movl_T2_r7,
- gen_op_movl_T2_r8,
- gen_op_movl_T2_r9,
- gen_op_movl_T2_r10,
- gen_op_movl_T2_r11,
- gen_op_movl_T2_r12,
- gen_op_movl_T2_r13,
- gen_op_movl_T2_r14,
- gen_op_movl_T2_r15,
- },
-};
+/* Set PC and Thumb state from an immediate address. */
+static inline void gen_bx_im(DisasContext *s, uint32_t addr)
+{
+ TCGv tmp;
-static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
- {
- gen_op_movl_r0_T0,
- gen_op_movl_r1_T0,
- gen_op_movl_r2_T0,
- gen_op_movl_r3_T0,
- gen_op_movl_r4_T0,
- gen_op_movl_r5_T0,
- gen_op_movl_r6_T0,
- gen_op_movl_r7_T0,
- gen_op_movl_r8_T0,
- gen_op_movl_r9_T0,
- gen_op_movl_r10_T0,
- gen_op_movl_r11_T0,
- gen_op_movl_r12_T0,
- gen_op_movl_r13_T0,
- gen_op_movl_r14_T0,
- gen_op_movl_r15_T0,
- },
- {
- gen_op_movl_r0_T1,
- gen_op_movl_r1_T1,
- gen_op_movl_r2_T1,
- gen_op_movl_r3_T1,
- gen_op_movl_r4_T1,
- gen_op_movl_r5_T1,
- gen_op_movl_r6_T1,
- gen_op_movl_r7_T1,
- gen_op_movl_r8_T1,
- gen_op_movl_r9_T1,
- gen_op_movl_r10_T1,
- gen_op_movl_r11_T1,
- gen_op_movl_r12_T1,
- gen_op_movl_r13_T1,
- gen_op_movl_r14_T1,
- gen_op_movl_r15_T1,
- },
-};
+ s->is_jmp = DISAS_UPDATE;
+ tmp = new_tmp();
+ if (s->thumb != (addr & 1)) {
+ tcg_gen_movi_i32(tmp, addr & 1);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
+ }
+ tcg_gen_movi_i32(tmp, addr & ~1);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
+ dead_tmp(tmp);
+}
-static GenOpFunc1 *gen_op_movl_TN_im[3] = {
- gen_op_movl_T0_im,
- gen_op_movl_T1_im,
- gen_op_movl_T2_im,
-};
+/* Set PC and Thumb state from var. var is marked as dead. */
+static inline void gen_bx(DisasContext *s, TCGv var)
+{
+ TCGv tmp;
-static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
- gen_op_shll_T0_im_thumb,
- gen_op_shrl_T0_im_thumb,
- gen_op_sarl_T0_im_thumb,
-};
+ s->is_jmp = DISAS_UPDATE;
+ tmp = new_tmp();
+ tcg_gen_andi_i32(tmp, var, 1);
+ store_cpu_field(tmp, thumb);
+ tcg_gen_andi_i32(var, var, ~1);
+ store_cpu_field(var, regs[15]);
+}
-static inline void gen_bx(DisasContext *s)
+/* TODO: This should be removed. Use gen_bx instead. */
+static inline void gen_bx_T0(DisasContext *s)
{
- s->is_jmp = DISAS_UPDATE;
- gen_op_bx_T0();
+ TCGv tmp = new_tmp();
+ tcg_gen_mov_i32(tmp, cpu_T[0]);
+ gen_bx(s, tmp);
}
-
#if defined(CONFIG_USER_ONLY)
#define gen_ldst(name, s) gen_op_##name##_raw()
#else
#define gen_ldst(name, s) do { \
+ s->is_mem = 1; \
if (IS_USER(s)) \
gen_op_##name##_user(); \
else \
gen_op_##name##_kernel(); \
} while (0)
#endif
-
-static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
+static inline TCGv gen_ld8s(TCGv addr, int index)
{
- int val;
-
- if (reg == 15) {
- /* normaly, since we updated PC, we need only to add one insn */
- if (s->thumb)
- val = (long)s->pc + 2;
- else
- val = (long)s->pc + 4;
- gen_op_movl_TN_im[t](val);
- } else {
- gen_op_movl_TN_reg[t][reg]();
- }
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ return tmp;
+}
+static inline TCGv gen_ld8u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8u(tmp, addr, index);
+ return tmp;
+}
+static inline TCGv gen_ld16s(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ return tmp;
+}
+static inline TCGv gen_ld16u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16u(tmp, addr, index);
+ return tmp;
+}
+static inline TCGv gen_ld32(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ return tmp;
+}
+static inline void gen_st8(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st8(val, addr, index);
+ dead_tmp(val);
+}
+static inline void gen_st16(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st16(val, addr, index);
+ dead_tmp(val);
+}
+static inline void gen_st32(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st32(val, addr, index);
+ dead_tmp(val);
}
static inline void gen_movl_T0_reg(DisasContext *s, int reg)
{
- gen_movl_TN_reg(s, reg, 0);
+ load_reg_var(s, cpu_T[0], reg);
}
static inline void gen_movl_T1_reg(DisasContext *s, int reg)
{
- gen_movl_TN_reg(s, reg, 1);
+ load_reg_var(s, cpu_T[1], reg);
}
static inline void gen_movl_T2_reg(DisasContext *s, int reg)
{
- gen_movl_TN_reg(s, reg, 2);
+ load_reg_var(s, cpu_T[2], reg);
+}
+
+static inline void gen_set_pc_im(uint32_t val)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_cpu_field(tmp, regs[15]);
}
static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
{
- gen_op_movl_reg_TN[t][reg]();
+ TCGv tmp;
if (reg == 15) {
+ tmp = new_tmp();
+ tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
+ } else {
+ tmp = cpu_T[t];
+ }
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
+ if (reg == 15) {
+ dead_tmp(tmp);
s->is_jmp = DISAS_JUMP;
}
}
@@ -370,9 +965,11 @@ static inline void gen_lookup_tb(DisasContext *s)
s->is_jmp = DISAS_UPDATE;
}
-static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
+static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
+ TCGv var)
{
int val, rm, shift, shiftop;
+ TCGv offset;
if (!(insn & (1 << 25))) {
/* immediate */
@@ -380,29 +977,27 @@ static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
if (!(insn & (1 << 23)))
val = -val;
if (val != 0)
- gen_op_addl_T1_im(val);
+ tcg_gen_addi_i32(var, var, val);
} else {
/* shift/register */
rm = (insn) & 0xf;
shift = (insn >> 7) & 0x1f;
- gen_movl_T2_reg(s, rm);
shiftop = (insn >> 5) & 3;
- if (shift != 0) {
- gen_shift_T2_im[shiftop](shift);
- } else if (shiftop != 0) {
- gen_shift_T2_0[shiftop]();
- }
+ offset = load_reg(s, rm);
+ gen_arm_shift_im(offset, shiftop, shift, 0);
if (!(insn & (1 << 23)))
- gen_op_subl_T1_T2();
+ tcg_gen_sub_i32(var, var, offset);
else
- gen_op_addl_T1_T2();
+ tcg_gen_add_i32(var, var, offset);
+ dead_tmp(offset);
}
}
static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
- int extra)
+ int extra, TCGv var)
{
int val, rm;
+ TCGv offset;
if (insn & (1 << 22)) {
/* immediate */
@@ -411,62 +1006,174 @@ static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
val = -val;
val += extra;
if (val != 0)
- gen_op_addl_T1_im(val);
+ tcg_gen_addi_i32(var, var, val);
} else {
/* register */
if (extra)
- gen_op_addl_T1_im(extra);
+ tcg_gen_addi_i32(var, var, extra);
rm = (insn) & 0xf;
- gen_movl_T2_reg(s, rm);
+ offset = load_reg(s, rm);
if (!(insn & (1 << 23)))
- gen_op_subl_T1_T2();
+ tcg_gen_sub_i32(var, var, offset);
else
- gen_op_addl_T1_T2();
- }
-}
-
-#define VFP_OP(name) \
-static inline void gen_vfp_##name(int dp) \
-{ \
- if (dp) \
- gen_op_vfp_##name##d(); \
- else \
- gen_op_vfp_##name##s(); \
-}
-
-VFP_OP(add)
-VFP_OP(sub)
-VFP_OP(mul)
-VFP_OP(div)
-VFP_OP(neg)
-VFP_OP(abs)
-VFP_OP(sqrt)
-VFP_OP(cmp)
-VFP_OP(cmpe)
-VFP_OP(F1_ld0)
-VFP_OP(uito)
-VFP_OP(sito)
-VFP_OP(toui)
-VFP_OP(touiz)
-VFP_OP(tosi)
-VFP_OP(tosiz)
-
-#undef VFP_OP
+ tcg_gen_add_i32(var, var, offset);
+ dead_tmp(offset);
+ }
+}
+
+#define VFP_OP2(name) \
+static inline void gen_vfp_##name(int dp) \
+{ \
+ if (dp) \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
+ else \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
+}
+
+#define VFP_OP1(name) \
+static inline void gen_vfp_##name(int dp, int arg) \
+{ \
+ if (dp) \
+ gen_op_vfp_##name##d(arg); \
+ else \
+ gen_op_vfp_##name##s(arg); \
+}
+
+VFP_OP2(add)
+VFP_OP2(sub)
+VFP_OP2(mul)
+VFP_OP2(div)
+
+#undef VFP_OP2
+
+static inline void gen_vfp_abs(int dp)
+{
+ if (dp)
+ gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
+ else
+ gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
+}
+
+static inline void gen_vfp_neg(int dp)
+{
+ if (dp)
+ gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
+ else
+ gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
+}
+
+static inline void gen_vfp_sqrt(int dp)
+{
+ if (dp)
+ gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_cmp(int dp)
+{
+ if (dp)
+ gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
+ else
+ gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
+}
+
+static inline void gen_vfp_cmpe(int dp)
+{
+ if (dp)
+ gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
+ else
+ gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
+}
+
+static inline void gen_vfp_F1_ld0(int dp)
+{
+ if (dp)
+ tcg_gen_movi_i64(cpu_F1d, 0);
+ else
+ tcg_gen_movi_i32(cpu_F1s, 0);
+}
+
+static inline void gen_vfp_uito(int dp)
+{
+ if (dp)
+ gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
+ else
+ gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_sito(int dp)
+{
+ if (dp)
+ gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
+ else
+ gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_toui(int dp)
+{
+ if (dp)
+ gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_touiz(int dp)
+{
+ if (dp)
+ gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_tosi(int dp)
+{
+ if (dp)
+ gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_tosiz(int dp)
+{
+ if (dp)
+ gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+#define VFP_GEN_FIX(name) \
+static inline void gen_vfp_##name(int dp, int shift) \
+{ \
+ if (dp) \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
+ else \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
+}
+VFP_GEN_FIX(tosh)
+VFP_GEN_FIX(tosl)
+VFP_GEN_FIX(touh)
+VFP_GEN_FIX(toul)
+VFP_GEN_FIX(shto)
+VFP_GEN_FIX(slto)
+VFP_GEN_FIX(uhto)
+VFP_GEN_FIX(ulto)
+#undef VFP_GEN_FIX
static inline void gen_vfp_ld(DisasContext *s, int dp)
{
if (dp)
- gen_ldst(vfp_ldd, s);
+ tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
else
- gen_ldst(vfp_lds, s);
+ tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
}
static inline void gen_vfp_st(DisasContext *s, int dp)
{
if (dp)
- gen_ldst(vfp_std, s);
+ tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
else
- gen_ldst(vfp_sts, s);
+ tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
}
static inline long
@@ -482,79 +1189,1593 @@ vfp_reg_offset (int dp, int reg)
+ offsetof(CPU_DoubleU, l.lower);
}
}
+
+/* Return the offset of a 32-bit piece of a NEON register.
+ zero is the least significant end of the register. */
+static inline long
+neon_reg_offset (int reg, int n)
+{
+ int sreg;
+ sreg = reg * 2 + n;
+ return vfp_reg_offset(0, sreg);
+}
+
+/* FIXME: Remove these. */
+#define neon_T0 cpu_T[0]
+#define neon_T1 cpu_T[1]
+#define NEON_GET_REG(T, reg, n) \
+ tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
+#define NEON_SET_REG(T, reg, n) \
+ tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
+
+static TCGv neon_load_reg(int reg, int pass)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
+ return tmp;
+}
+
+static void neon_store_reg(int reg, int pass, TCGv var)
+{
+ tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
+ dead_tmp(var);
+}
+
+static inline void neon_load_reg64(TCGv var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
+static inline void neon_store_reg64(TCGv var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
+#define tcg_gen_ld_f32 tcg_gen_ld_i32
+#define tcg_gen_ld_f64 tcg_gen_ld_i64
+#define tcg_gen_st_f32 tcg_gen_st_i32
+#define tcg_gen_st_f64 tcg_gen_st_i64
+
static inline void gen_mov_F0_vreg(int dp, int reg)
{
if (dp)
- gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
+ tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
else
- gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
}
static inline void gen_mov_F1_vreg(int dp, int reg)
{
if (dp)
- gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
+ tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
else
- gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
+ tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
}
static inline void gen_mov_vreg_F0(int dp, int reg)
{
if (dp)
- gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
+ tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
else
- gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
+ tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
+}
+
+#define ARM_CP_RW_BIT (1 << 20)
+
+static inline void iwmmxt_load_reg(TCGv var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+}
+
+static inline void iwmmxt_store_reg(TCGv var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+}
+
+static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
+{
+ tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+}
+
+static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
+{
+ tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+}
+
+static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
+{
+ tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+}
+
+static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
+{
+ iwmmxt_store_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+#define IWMMXT_OP(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV_SIZE(name) \
+IWMMXT_OP_ENV(name##b) \
+IWMMXT_OP_ENV(name##w) \
+IWMMXT_OP_ENV(name##l)
+
+#define IWMMXT_OP_ENV1(name) \
+static inline void gen_op_iwmmxt_##name##_M0(void) \
+{ \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
+}
+
+IWMMXT_OP(maddsq)
+IWMMXT_OP(madduq)
+IWMMXT_OP(sadb)
+IWMMXT_OP(sadw)
+IWMMXT_OP(mulslw)
+IWMMXT_OP(mulshw)
+IWMMXT_OP(mululw)
+IWMMXT_OP(muluhw)
+IWMMXT_OP(macsw)
+IWMMXT_OP(macuw)
+
+IWMMXT_OP_ENV_SIZE(unpackl)
+IWMMXT_OP_ENV_SIZE(unpackh)
+
+IWMMXT_OP_ENV1(unpacklub)
+IWMMXT_OP_ENV1(unpackluw)
+IWMMXT_OP_ENV1(unpacklul)
+IWMMXT_OP_ENV1(unpackhub)
+IWMMXT_OP_ENV1(unpackhuw)
+IWMMXT_OP_ENV1(unpackhul)
+IWMMXT_OP_ENV1(unpacklsb)
+IWMMXT_OP_ENV1(unpacklsw)
+IWMMXT_OP_ENV1(unpacklsl)
+IWMMXT_OP_ENV1(unpackhsb)
+IWMMXT_OP_ENV1(unpackhsw)
+IWMMXT_OP_ENV1(unpackhsl)
+
+IWMMXT_OP_ENV_SIZE(cmpeq)
+IWMMXT_OP_ENV_SIZE(cmpgtu)
+IWMMXT_OP_ENV_SIZE(cmpgts)
+
+IWMMXT_OP_ENV_SIZE(mins)
+IWMMXT_OP_ENV_SIZE(minu)
+IWMMXT_OP_ENV_SIZE(maxs)
+IWMMXT_OP_ENV_SIZE(maxu)
+
+IWMMXT_OP_ENV_SIZE(subn)
+IWMMXT_OP_ENV_SIZE(addn)
+IWMMXT_OP_ENV_SIZE(subu)
+IWMMXT_OP_ENV_SIZE(addu)
+IWMMXT_OP_ENV_SIZE(subs)
+IWMMXT_OP_ENV_SIZE(adds)
+
+IWMMXT_OP_ENV(avgb0)
+IWMMXT_OP_ENV(avgb1)
+IWMMXT_OP_ENV(avgw0)
+IWMMXT_OP_ENV(avgw1)
+
+IWMMXT_OP(msadb)
+
+IWMMXT_OP_ENV(packuw)
+IWMMXT_OP_ENV(packul)
+IWMMXT_OP_ENV(packuq)
+IWMMXT_OP_ENV(packsw)
+IWMMXT_OP_ENV(packsl)
+IWMMXT_OP_ENV(packsq)
+
+static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
+{
+ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
+}
+
+static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
+{
+ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
+}
+
+static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
+{
+ gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
+}
+
+static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
+}
+
+static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
+{
+ TCGv tmp = tcg_const_i32(shift);
+ gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
+}
+
+static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
+{
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
+ tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
+ tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
+}
+
+static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
+{
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
+ tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
+ tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
+}
+
+static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
+{
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
+ tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
+ if (mask != ~0u)
+ tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
+}
+
+static void gen_op_iwmmxt_set_mup(void)
+{
+ TCGv tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 2);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_set_cup(void)
+{
+ TCGv tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 1);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_setpsr_nz(void)
+{
+ TCGv tmp = new_tmp();
+ gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
+}
+
+static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+
+static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V0, rn);
+ tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
+}
+
+static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
+{
+ tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
+ tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ iwmmxt_store_reg(cpu_V0, rn);
+}
+
+static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
+{
+ int rd;
+ uint32_t offset;
+
+ rd = (insn >> 16) & 0xf;
+ gen_movl_T1_reg(s, rd);
+
+ offset = (insn & 0xff) << ((insn >> 7) & 2);
+ if (insn & (1 << 24)) {
+ /* Pre indexed */
+ if (insn & (1 << 23))
+ gen_op_addl_T1_im(offset);
+ else
+ gen_op_addl_T1_im(-offset);
+
+ if (insn & (1 << 21))
+ gen_movl_reg_T1(s, rd);
+ } else if (insn & (1 << 21)) {
+ /* Post indexed */
+ if (insn & (1 << 23))
+ gen_op_movl_T0_im(offset);
+ else
+ gen_op_movl_T0_im(- offset);
+ gen_op_addl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ } else if (!(insn & (1 << 23)))
+ return 1;
+ return 0;
+}
+
+static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
+{
+ int rd = (insn >> 0) & 0xf;
+
+ if (insn & (1 << 8))
+ if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
+ return 1;
+ else
+ gen_op_iwmmxt_movl_T0_wCx(rd);
+ else
+ gen_iwmmxt_movl_T0_T1_wRn(rd);
+
+ gen_op_movl_T1_im(mask);
+ gen_op_andl_T0_T1();
+ return 0;
+}
+
+/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
+ (ie. an undefined instruction). */
+static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ int rd, wrd;
+ int rdhi, rdlo, rd0, rd1, i;
+ TCGv tmp;
+
+ if ((insn & 0x0e000e00) == 0x0c000000) {
+ if ((insn & 0x0fe00ff0) == 0x0c400000) {
+ wrd = insn & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ rdhi = (insn >> 16) & 0xf;
+ if (insn & ARM_CP_RW_BIT) { /* TMRRC */
+ gen_iwmmxt_movl_T0_T1_wRn(wrd);
+ gen_movl_reg_T0(s, rdlo);
+ gen_movl_reg_T1(s, rdhi);
+ } else { /* TMCRR */
+ gen_movl_T0_reg(s, rdlo);
+ gen_movl_T1_reg(s, rdhi);
+ gen_iwmmxt_movl_wRn_T0_T1(wrd);
+ gen_op_iwmmxt_set_mup();
+ }
+ return 0;
+ }
+
+ wrd = (insn >> 12) & 0xf;
+ if (gen_iwmmxt_address(s, insn))
+ return 1;
+ if (insn & ARM_CP_RW_BIT) {
+ if ((insn >> 28) == 0xf) { /* WLDRW wCx */
+ tmp = gen_ld32(cpu_T[1], IS_USER(s));
+ tcg_gen_mov_i32(cpu_T[0], tmp);
+ dead_tmp(tmp);
+ gen_op_iwmmxt_movl_wCx_T0(wrd);
+ } else {
+ i = 1;
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WLDRD */
+ tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
+ i = 0;
+ } else { /* WLDRW wRd */
+ tmp = gen_ld32(cpu_T[1], IS_USER(s));
+ }
+ } else {
+ if (insn & (1 << 22)) { /* WLDRH */
+ tmp = gen_ld16u(cpu_T[1], IS_USER(s));
+ } else { /* WLDRB */
+ tmp = gen_ld8u(cpu_T[1], IS_USER(s));
+ }
+ }
+ if (i) {
+ tcg_gen_extu_i32_i64(cpu_M0, tmp);
+ dead_tmp(tmp);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ }
+ } else {
+ if ((insn >> 28) == 0xf) { /* WSTRW wCx */
+ gen_op_iwmmxt_movl_T0_wCx(wrd);
+ tmp = new_tmp();
+ tcg_gen_mov_i32(tmp, cpu_T[0]);
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ } else {
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = new_tmp();
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WSTRD */
+ dead_tmp(tmp);
+ tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
+ } else { /* WSTRW wRd */
+ tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ }
+ } else {
+ if (insn & (1 << 22)) { /* WSTRH */
+ tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ gen_st16(tmp, cpu_T[1], IS_USER(s));
+ } else { /* WSTRB */
+ tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ gen_st8(tmp, cpu_T[1], IS_USER(s));
+ }
+ }
+ }
+ }
+ return 0;
+ }
+
+ if ((insn & 0x0f000000) != 0x0e000000)
+ return 1;
+
+ switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
+ case 0x000: /* WOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_orq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x011: /* TMCR */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ switch (wrd) {
+ case ARM_IWMMXT_wCID:
+ case ARM_IWMMXT_wCASF:
+ break;
+ case ARM_IWMMXT_wCon:
+ gen_op_iwmmxt_set_cup();
+ /* Fall through. */
+ case ARM_IWMMXT_wCSSF:
+ gen_op_iwmmxt_movl_T0_wCx(wrd);
+ gen_movl_T1_reg(s, rd);
+ gen_op_bicl_T0_T1();
+ gen_op_iwmmxt_movl_wCx_T0(wrd);
+ break;
+ case ARM_IWMMXT_wCGR0:
+ case ARM_IWMMXT_wCGR1:
+ case ARM_IWMMXT_wCGR2:
+ case ARM_IWMMXT_wCGR3:
+ gen_op_iwmmxt_set_cup();
+ gen_movl_reg_T0(s, rd);
+ gen_op_iwmmxt_movl_wCx_T0(wrd);
+ break;
+ default:
+ return 1;
+ }
+ break;
+ case 0x100: /* WXOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_xorq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x111: /* TMRC */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movl_T0_wCx(wrd);
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x300: /* WANDN */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tcg_gen_neg_i64(cpu_M0, cpu_M0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x200: /* WAND */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x810: case 0xa10: /* WMADD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maddsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_madduq_M0_wRn(rd1);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackll_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22))
+ gen_op_iwmmxt_sadw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_sadb_M0_wRn(rd1);
+ if (!(insn & (1 << 20)))
+ gen_op_iwmmxt_addl_M0_wRn(wrd);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_mulshw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mulslw_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_muluhw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mululw_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_macsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_macuw_M0_wRn(rd1);
+ if (!(insn & (1 << 20))) {
+ iwmmxt_load_reg(cpu_V1, wrd);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgw1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgw0_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgb1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgb0_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
+ gen_op_movl_T1_im(7);
+ gen_op_andl_T0_T1();
+ gen_op_iwmmxt_align_M0_T0_wRn(rd1);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ gen_movl_T0_reg(s, rd);
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ gen_op_movl_T1_im(0xff);
+ gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
+ break;
+ case 1:
+ gen_op_movl_T1_im(0xffff);
+ gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
+ break;
+ case 2:
+ gen_op_movl_T1_im(0xffffffff);
+ gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ if (rd == 15)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & 8)
+ gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
+ else {
+ gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
+ }
+ break;
+ case 1:
+ if (insn & 8)
+ gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
+ else {
+ gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
+ }
+ break;
+ case 2:
+ gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
+ if ((insn & 0x000ff008) != 0x0003f000)
+ return 1;
+ gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
+ break;
+ case 1:
+ gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
+ break;
+ case 2:
+ gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_shll_T1_im(28);
+ gen_set_nzcv(cpu_T[1]);
+ break;
+ case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ gen_movl_T0_reg(s, rd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
+ break;
+ case 1:
+ gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
+ break;
+ case 2:
+ gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
+ if ((insn & 0x000ff00f) != 0x0003f000)
+ return 1;
+ gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ gen_op_shll_T1_im(4);
+ gen_op_andl_T0_T1();
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ gen_op_shll_T1_im(8);
+ gen_op_andl_T0_T1();
+ }
+ break;
+ case 2:
+ gen_op_shll_T1_im(16);
+ gen_op_andl_T0_T1();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_set_nzcv(cpu_T[0]);
+ break;
+ case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
+ if ((insn & 0x000ff00f) != 0x0003f000)
+ return 1;
+ gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ gen_op_shll_T1_im(4);
+ gen_op_orl_T0_T1();
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ gen_op_shll_T1_im(8);
+ gen_op_orl_T0_T1();
+ }
+ break;
+ case 2:
+ gen_op_shll_T1_im(16);
+ gen_op_orl_T0_T1();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_set_nzcv(cpu_T[0]);
+ break;
+ case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
+ rd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ if ((insn & 0xf) != 0)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
+ case 0x906: case 0xb06: case 0xd06: case 0xf06:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
+ case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsb_M0();
+ else
+ gen_op_iwmmxt_unpacklub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsw_M0();
+ else
+ gen_op_iwmmxt_unpackluw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsl_M0();
+ else
+ gen_op_iwmmxt_unpacklul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
+ case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsb_M0();
+ else
+ gen_op_iwmmxt_unpackhub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsw_M0();
+ else
+ gen_op_iwmmxt_unpackhuw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsl_M0();
+ else
+ gen_op_iwmmxt_unpackhul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
+ case 0x214: case 0x614: case 0xa14: case 0xe14:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (gen_iwmmxt_shift(insn, 0xff))
+ return 1;
+ switch ((insn >> 22) & 3) {
+ case 0:
+ return 1;
+ case 1:
+ gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 2:
+ gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 3:
+ gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
+ case 0x014: case 0x414: case 0x814: case 0xc14:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (gen_iwmmxt_shift(insn, 0xff))
+ return 1;
+ switch ((insn >> 22) & 3) {
+ case 0:
+ return 1;
+ case 1:
+ gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 2:
+ gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
+ case 0x114: case 0x514: case 0x914: case 0xd14:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (gen_iwmmxt_shift(insn, 0xff))
+ return 1;
+ switch ((insn >> 22) & 3) {
+ case 0:
+ return 1;
+ case 1:
+ gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 2:
+ gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
+ case 0x314: case 0x714: case 0xb14: case 0xf14:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ return 1;
+ case 1:
+ if (gen_iwmmxt_shift(insn, 0xf))
+ return 1;
+ gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 2:
+ if (gen_iwmmxt_shift(insn, 0x1f))
+ return 1;
+ gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ case 3:
+ if (gen_iwmmxt_shift(insn, 0x3f))
+ return 1;
+ gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
+ case 0x916: case 0xb16: case 0xd16: case 0xf16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
+ case 0x816: case 0xa16: case 0xc16: case 0xe16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
+ case 0x402: case 0x502: case 0x602: case 0x702:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_movl_T0_im((insn >> 20) & 3);
+ gen_op_iwmmxt_align_M0_T0_wRn(rd1);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
+ case 0x41a: case 0x51a: case 0x61a: case 0x71a:
+ case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
+ case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_subnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_subub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_subsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_subnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_subuw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_subsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_subnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_subul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_subsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
+ case 0x41e: case 0x51e: case 0x61e: case 0x71e:
+ case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
+ case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
+ gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
+ case 0x418: case 0x518: case 0x618: case 0x718:
+ case 0x818: case 0x918: case 0xa18: case 0xb18:
+ case 0xc18: case 0xd18: case 0xe18: case 0xf18:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_addnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_addub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_addsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_addnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_adduw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_addsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_addnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_addul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_addsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
+ case 0x408: case 0x508: case 0x608: case 0x708:
+ case 0x808: case 0x908: case 0xa08: case 0xb08:
+ case 0xc08: case 0xd08: case 0xe08: case 0xf08:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (!(insn & (1 << 20)))
+ return 1;
+ switch ((insn >> 22) & 3) {
+ case 0:
+ return 1;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packul_M0_wRn(rd1);
+ break;
+ case 3:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuq_M0_wRn(rd1);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x201: case 0x203: case 0x205: case 0x207:
+ case 0x209: case 0x20b: case 0x20d: case 0x20f:
+ case 0x211: case 0x213: case 0x215: case 0x217:
+ case 0x219: case 0x21b: case 0x21d: case 0x21f:
+ wrd = (insn >> 5) & 0xf;
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ if (rd0 == 0xf || rd1 == 0xf)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* TMIA */
+ gen_movl_T0_reg(s, rd0);
+ gen_movl_T1_reg(s, rd1);
+ gen_op_iwmmxt_muladdsl_M0_T0_T1();
+ break;
+ case 0x8: /* TMIAPH */
+ gen_movl_T0_reg(s, rd0);
+ gen_movl_T1_reg(s, rd1);
+ gen_op_iwmmxt_muladdsw_M0_T0_T1();
+ break;
+ case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
+ gen_movl_T1_reg(s, rd0);
+ if (insn & (1 << 16))
+ gen_op_shrl_T1_im(16);
+ gen_op_movl_T0_T1();
+ gen_movl_T1_reg(s, rd1);
+ if (insn & (1 << 17))
+ gen_op_shrl_T1_im(16);
+ gen_op_iwmmxt_muladdswl_M0_T0_T1();
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
+ (ie. an undefined instruction). */
+static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ int acc, rd0, rd1, rdhi, rdlo;
+
+ if ((insn & 0x0ff00f10) == 0x0e200010) {
+ /* Multiply with Internal Accumulate Format */
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = insn & 0xf;
+ acc = (insn >> 5) & 7;
+
+ if (acc != 0)
+ return 1;
+
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* MIA */
+ gen_movl_T0_reg(s, rd0);
+ gen_movl_T1_reg(s, rd1);
+ gen_op_iwmmxt_muladdsl_M0_T0_T1();
+ break;
+ case 0x8: /* MIAPH */
+ gen_movl_T0_reg(s, rd0);
+ gen_movl_T1_reg(s, rd1);
+ gen_op_iwmmxt_muladdsw_M0_T0_T1();
+ break;
+ case 0xc: /* MIABB */
+ case 0xd: /* MIABT */
+ case 0xe: /* MIATB */
+ case 0xf: /* MIATT */
+ gen_movl_T1_reg(s, rd0);
+ if (insn & (1 << 16))
+ gen_op_shrl_T1_im(16);
+ gen_op_movl_T0_T1();
+ gen_movl_T1_reg(s, rd1);
+ if (insn & (1 << 17))
+ gen_op_shrl_T1_im(16);
+ gen_op_iwmmxt_muladdswl_M0_T0_T1();
+ break;
+ default:
+ return 1;
+ }
+
+ gen_op_iwmmxt_movq_wRn_M0(acc);
+ return 0;
+ }
+
+ if ((insn & 0x0fe00ff8) == 0x0c400000) {
+ /* Internal Accumulator Access Format */
+ rdhi = (insn >> 16) & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ acc = insn & 7;
+
+ if (acc != 0)
+ return 1;
+
+ if (insn & ARM_CP_RW_BIT) { /* MRA */
+ gen_iwmmxt_movl_T0_T1_wRn(acc);
+ gen_movl_reg_T0(s, rdlo);
+ gen_op_movl_T0_im((1 << (40 - 32)) - 1);
+ gen_op_andl_T0_T1();
+ gen_movl_reg_T0(s, rdhi);
+ } else { /* MAR */
+ gen_movl_T0_reg(s, rdlo);
+ gen_movl_T1_reg(s, rdhi);
+ gen_iwmmxt_movl_wRn_T0_T1(acc);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Disassemble system coprocessor instruction. Return nonzero if
+ instruction is not defined. */
+static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+{
+ TCGv tmp;
+ uint32_t rd = (insn >> 12) & 0xf;
+ uint32_t cp = (insn >> 8) & 0xf;
+ if (IS_USER(s)) {
+ return 1;
+ }
+
+ if (insn & ARM_CP_RW_BIT) {
+ if (!env->cp[cp].cp_read)
+ return 1;
+ gen_set_pc_im(s->pc);
+ tmp = new_tmp();
+ gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
+ store_reg(s, rd, tmp);
+ } else {
+ if (!env->cp[cp].cp_write)
+ return 1;
+ gen_set_pc_im(s->pc);
+ tmp = load_reg(s, rd);
+ gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
+ dead_tmp(tmp);
+ }
+ return 0;
+}
+
+static int cp15_user_ok(uint32_t insn)
+{
+ int cpn = (insn >> 16) & 0xf;
+ int cpm = insn & 0xf;
+ int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
+
+ if (cpn == 13 && cpm == 0) {
+ /* TLS register. */
+ if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
+ return 1;
+ }
+ if (cpn == 7) {
+ /* ISB, DSB, DMB. */
+ if ((cpm == 5 && op == 4)
+ || (cpm == 10 && (op == 4 || op == 5)))
+ return 1;
+ }
+ return 0;
}
/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
instruction is not defined. */
-static int disas_cp15_insn(DisasContext *s, uint32_t insn)
+static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
{
uint32_t rd;
+ TCGv tmp;
- /* ??? Some cp15 registers are accessible from userspace. */
- if (IS_USER(s)) {
+ /* M profile cores use memory mapped registers instead of cp15. */
+ if (arm_feature(env, ARM_FEATURE_M))
+ return 1;
+
+ if ((insn & (1 << 25)) == 0) {
+ if (insn & (1 << 20)) {
+ /* mrrc */
+ return 1;
+ }
+ /* mcrr. Used for block cache operations, so implement as no-op. */
+ return 0;
+ }
+ if ((insn & (1 << 4)) == 0) {
+ /* cdp */
+ return 1;
+ }
+ if (IS_USER(s) && !cp15_user_ok(insn)) {
return 1;
}
if ((insn & 0x0fff0fff) == 0x0e070f90
|| (insn & 0x0fff0fff) == 0x0e070f58) {
/* Wait for interrupt. */
- gen_op_movl_T0_im((long)s->pc);
- gen_op_movl_reg_TN[0][15]();
- gen_op_wfi();
- s->is_jmp = DISAS_JUMP;
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_WFI;
return 0;
}
rd = (insn >> 12) & 0xf;
- if (insn & (1 << 20)) {
- gen_op_movl_T0_cp15(insn);
+ if (insn & ARM_CP_RW_BIT) {
+ tmp = new_tmp();
+ gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
/* If the destination register is r15 then sets condition codes. */
if (rd != 15)
- gen_movl_reg_T0(s, rd);
+ store_reg(s, rd, tmp);
+ else
+ dead_tmp(tmp);
} else {
- gen_movl_T0_reg(s, rd);
- gen_op_movl_cp15_T0(insn);
+ tmp = load_reg(s, rd);
+ gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
+ dead_tmp(tmp);
+ /* Normally we would always end the TB here, but Linux
+ * arch/arm/mach-pxa/sleep.S expects two instructions following
+ * an MMU enable to execute from cache. Imitate this behaviour. */
+ if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
+ (insn & 0x0fff0fff) != 0x0e010f10)
+ gen_lookup_tb(s);
}
- gen_lookup_tb(s);
return 0;
}
+#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
+#define VFP_SREG(insn, bigbit, smallbit) \
+ ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
+#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
+ if (arm_feature(env, ARM_FEATURE_VFP3)) { \
+ reg = (((insn) >> (bigbit)) & 0x0f) \
+ | (((insn) >> ((smallbit) - 4)) & 0x10); \
+ } else { \
+ if (insn & (1 << (smallbit))) \
+ return 1; \
+ reg = ((insn) >> (bigbit)) & 0x0f; \
+ }} while (0)
+
+#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
+#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
+#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
+#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
+#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
+#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
+
+/* Move between integer and VFP cores. */
+static TCGv gen_vfp_mrs(void)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_mov_i32(tmp, cpu_F0s);
+ return tmp;
+}
+
+static void gen_vfp_msr(TCGv tmp)
+{
+ tcg_gen_mov_i32(cpu_F0s, tmp);
+ dead_tmp(tmp);
+}
+
+static inline int
+vfp_enabled(CPUState * env)
+{
+ return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
+}
+
+static void gen_neon_dup_u8(TCGv var, int shift)
+{
+ TCGv tmp = new_tmp();
+ if (shift)
+ tcg_gen_shri_i32(var, var, shift);
+ tcg_gen_ext8u_i32(var, var);
+ tcg_gen_shli_i32(tmp, var, 8);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_gen_shli_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
+static void gen_neon_dup_low16(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_ext16u_i32(var, var);
+ tcg_gen_shli_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
+static void gen_neon_dup_high16(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_andi_i32(var, var, 0xffff0000);
+ tcg_gen_shri_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+}
+
/* Disassemble a VFP instruction. Returns nonzero if an error occured
(ie. an undefined instruction). */
static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
+ TCGv tmp;
+ TCGv tmp2;
if (!arm_feature(env, ARM_FEATURE_VFP))
return 1;
- if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
- /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
+ if (!vfp_enabled(env)) {
+ /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
if ((insn & 0x0fe00fff) != 0x0ee00a10)
return 1;
rn = (insn >> 16) & 0xf;
- if (rn != 0 && rn != 8)
+ if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
+ && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
return 1;
}
dp = ((insn & 0xf00) == 0xb00);
@@ -562,91 +2783,188 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 0xe:
if (insn & (1 << 4)) {
/* single register transfer */
- if ((insn & 0x6f) != 0x00)
- return 1;
rd = (insn >> 12) & 0xf;
if (dp) {
- if (insn & 0x80)
+ int size;
+ int pass;
+
+ VFP_DREG_N(rn, insn);
+ if (insn & 0xf)
return 1;
- rn = (insn >> 16) & 0xf;
- /* Get the existing value even for arm->vfp moves because
- we only set half the register. */
- gen_mov_F0_vreg(1, rn);
- gen_op_vfp_mrrd();
- if (insn & (1 << 20)) {
+ if (insn & 0x00c00060
+ && !arm_feature(env, ARM_FEATURE_NEON))
+ return 1;
+
+ pass = (insn >> 21) & 1;
+ if (insn & (1 << 22)) {
+ size = 0;
+ offset = ((insn >> 5) & 3) * 8;
+ } else if (insn & (1 << 5)) {
+ size = 1;
+ offset = (insn & (1 << 6)) ? 16 : 0;
+ } else {
+ size = 2;
+ offset = 0;
+ }
+ if (insn & ARM_CP_RW_BIT) {
/* vfp->arm */
- if (insn & (1 << 21))
- gen_movl_reg_T1(s, rd);
- else
- gen_movl_reg_T0(s, rd);
+ tmp = neon_load_reg(rn, pass);
+ switch (size) {
+ case 0:
+ if (offset)
+ tcg_gen_shri_i32(tmp, tmp, offset);
+ if (insn & (1 << 23))
+ gen_uxtb(tmp);
+ else
+ gen_sxtb(tmp);
+ break;
+ case 1:
+ if (insn & (1 << 23)) {
+ if (offset) {
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ } else {
+ gen_uxth(tmp);
+ }
+ } else {
+ if (offset) {
+ tcg_gen_sari_i32(tmp, tmp, 16);
+ } else {
+ gen_sxth(tmp);
+ }
+ }
+ break;
+ case 2:
+ break;
+ }
+ store_reg(s, rd, tmp);
} else {
/* arm->vfp */
- if (insn & (1 << 21))
- gen_movl_T1_reg(s, rd);
- else
- gen_movl_T0_reg(s, rd);
- gen_op_vfp_mdrr();
- gen_mov_vreg_F0(dp, rn);
+ tmp = load_reg(s, rd);
+ if (insn & (1 << 23)) {
+ /* VDUP */
+ if (size == 0) {
+ gen_neon_dup_u8(tmp, 0);
+ } else if (size == 1) {
+ gen_neon_dup_low16(tmp);
+ }
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rn, 0, tmp2);
+ neon_store_reg(rn, 0, tmp);
+ } else {
+ /* VMOV */
+ switch (size) {
+ case 0:
+ tmp2 = neon_load_reg(rn, pass);
+ gen_bfi(tmp, tmp2, tmp, offset, 0xff);
+ dead_tmp(tmp2);
+ break;
+ case 1:
+ tmp2 = neon_load_reg(rn, pass);
+ gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
+ dead_tmp(tmp2);
+ break;
+ case 2:
+ break;
+ }
+ neon_store_reg(rn, pass, tmp);
+ }
}
- } else {
- rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
- if (insn & (1 << 20)) {
+ } else { /* !dp */
+ if ((insn & 0x6f) != 0x00)
+ return 1;
+ rn = VFP_SREG_N(insn);
+ if (insn & ARM_CP_RW_BIT) {
/* vfp->arm */
if (insn & (1 << 21)) {
/* system register */
rn >>= 1;
+
switch (rn) {
case ARM_VFP_FPSID:
+ /* VFP2 allows access to FSID from userspace.
+ VFP3 restricts all id registers to privileged
+ accesses. */
+ if (IS_USER(s)
+ && arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
case ARM_VFP_FPEXC:
+ if (IS_USER(s))
+ return 1;
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
case ARM_VFP_FPINST:
case ARM_VFP_FPINST2:
- gen_op_vfp_movl_T0_xreg(rn);
+ /* Not present in VFP3. */
+ if (IS_USER(s)
+ || arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ tmp = load_cpu_field(vfp.xregs[rn]);
break;
case ARM_VFP_FPSCR:
- if (rd == 15)
- gen_op_vfp_movl_T0_fpscr_flags();
- else
- gen_op_vfp_movl_T0_fpscr();
+ if (rd == 15) {
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
+ } else {
+ tmp = new_tmp();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ }
+ break;
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ if (IS_USER(s)
+ || !arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ tmp = load_cpu_field(vfp.xregs[rn]);
break;
default:
return 1;
}
} else {
gen_mov_F0_vreg(0, rn);
- gen_op_vfp_mrs();
+ tmp = gen_vfp_mrs();
}
if (rd == 15) {
/* Set the 4 flag bits in the CPSR. */
- gen_op_movl_cpsr_T0(0xf0000000);
- } else
- gen_movl_reg_T0(s, rd);
+ gen_set_nzcv(tmp);
+ dead_tmp(tmp);
+ } else {
+ store_reg(s, rd, tmp);
+ }
} else {
/* arm->vfp */
- gen_movl_T0_reg(s, rd);
+ tmp = load_reg(s, rd);
if (insn & (1 << 21)) {
rn >>= 1;
/* system register */
switch (rn) {
case ARM_VFP_FPSID:
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
/* Writes are ignored. */
break;
case ARM_VFP_FPSCR:
- gen_op_vfp_movl_fpscr_T0();
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ dead_tmp(tmp);
gen_lookup_tb(s);
break;
case ARM_VFP_FPEXC:
- gen_op_vfp_movl_xreg_T0(rn);
+ if (IS_USER(s))
+ return 1;
+ store_cpu_field(tmp, vfp.xregs[rn]);
gen_lookup_tb(s);
break;
case ARM_VFP_FPINST:
case ARM_VFP_FPINST2:
- gen_op_vfp_movl_xreg_T0(rn);
+ store_cpu_field(tmp, vfp.xregs[rn]);
break;
default:
return 1;
}
} else {
- gen_op_vfp_msr();
+ gen_vfp_msr(tmp);
gen_mov_vreg_F0(0, rn);
}
}
@@ -661,38 +2979,31 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
} else {
/* rn is register number */
- if (insn & (1 << 7))
- return 1;
- rn = (insn >> 16) & 0xf;
+ VFP_DREG_N(rn, insn);
}
if (op == 15 && (rn == 15 || rn > 17)) {
/* Integer or single precision destination. */
- rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
+ rd = VFP_SREG_D(insn);
} else {
- if (insn & (1 << 22))
- return 1;
- rd = (insn >> 12) & 0xf;
+ VFP_DREG_D(rd, insn);
}
if (op == 15 && (rn == 16 || rn == 17)) {
/* Integer source. */
rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
} else {
- if (insn & (1 << 5))
- return 1;
- rm = insn & 0xf;
+ VFP_DREG_M(rm, insn);
}
} else {
- rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
+ rn = VFP_SREG_N(insn);
if (op == 15 && rn == 15) {
/* Double precision destination. */
- if (insn & (1 << 22))
- return 1;
- rd = (insn >> 12) & 0xf;
- } else
- rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
- rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
+ VFP_DREG_D(rd, insn);
+ } else {
+ rd = VFP_SREG_D(insn);
+ }
+ rm = VFP_SREG_M(insn);
}
veclen = env->vfp.vec_len;
@@ -750,9 +3061,17 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
gen_mov_F0_vreg(dp, rd);
gen_vfp_F1_ld0(dp);
break;
+ case 20:
+ case 21:
+ case 22:
+ case 23:
+ /* Source and destination the same. */
+ gen_mov_F0_vreg(dp, rd);
+ break;
default:
/* One source operand. */
gen_mov_F0_vreg(dp, rm);
+ break;
}
} else {
/* Two source operands. */
@@ -801,6 +3120,28 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 8: /* div: fn / fm */
gen_vfp_div(dp);
break;
+ case 14: /* fconst */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+
+ n = (insn << 12) & 0x80000000;
+ i = ((insn >> 12) & 0x70) | (insn & 0xf);
+ if (dp) {
+ if (i & 0x40)
+ i |= 0x3f80;
+ else
+ i |= 0x4000;
+ n |= i << 16;
+ tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
+ } else {
+ if (i & 0x40)
+ i |= 0x780;
+ else
+ i |= 0x800;
+ n |= i << 19;
+ tcg_gen_movi_i32(cpu_F0s, n);
+ }
+ break;
case 15: /* extension space */
switch (rn) {
case 0: /* cpy */
@@ -830,9 +3171,9 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case 15: /* single<->double conversion */
if (dp)
- gen_op_vfp_fcvtsd();
+ gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
else
- gen_op_vfp_fcvtds();
+ gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
break;
case 16: /* fuito */
gen_vfp_uito(dp);
@@ -840,6 +3181,26 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 17: /* fsito */
gen_vfp_sito(dp);
break;
+ case 20: /* fshto */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_shto(dp, rm);
+ break;
+ case 21: /* fslto */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_slto(dp, rm);
+ break;
+ case 22: /* fuhto */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_uhto(dp, rm);
+ break;
+ case 23: /* fulto */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_ulto(dp, rm);
+ break;
case 24: /* ftoui */
gen_vfp_toui(dp);
break;
@@ -852,6 +3213,26 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
case 27: /* ftosiz */
gen_vfp_tosiz(dp);
break;
+ case 28: /* ftosh */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_tosh(dp, rm);
+ break;
+ case 29: /* ftosl */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_tosl(dp, rm);
+ break;
+ case 30: /* ftouh */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_touh(dp, rm);
+ break;
+ case 31: /* ftoul */
+ if (!arm_feature(env, ARM_FEATURE_VFP3))
+ return 1;
+ gen_vfp_toul(dp, rm);
+ break;
default: /* undefined */
printf ("rn:%d\n", rn);
return 1;
@@ -913,45 +3294,48 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
break;
case 0xc:
case 0xd:
- if (dp && (insn & (1 << 22))) {
+ if (dp && (insn & 0x03e00000) == 0x00400000) {
/* two-register transfer */
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
if (dp) {
- if (insn & (1 << 5))
- return 1;
- rm = insn & 0xf;
- } else
- rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
+ VFP_DREG_M(rm, insn);
+ } else {
+ rm = VFP_SREG_M(insn);
+ }
- if (insn & (1 << 20)) {
+ if (insn & ARM_CP_RW_BIT) {
/* vfp->arm */
if (dp) {
- gen_mov_F0_vreg(1, rm);
- gen_op_vfp_mrrd();
- gen_movl_reg_T0(s, rd);
- gen_movl_reg_T1(s, rn);
+ gen_mov_F0_vreg(0, rm * 2);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rd, tmp);
+ gen_mov_F0_vreg(0, rm * 2 + 1);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rn, tmp);
} else {
gen_mov_F0_vreg(0, rm);
- gen_op_vfp_mrs();
- gen_movl_reg_T0(s, rn);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rn, tmp);
gen_mov_F0_vreg(0, rm + 1);
- gen_op_vfp_mrs();
- gen_movl_reg_T0(s, rd);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rd, tmp);
}
} else {
/* arm->vfp */
if (dp) {
- gen_movl_T0_reg(s, rd);
- gen_movl_T1_reg(s, rn);
- gen_op_vfp_mdrr();
- gen_mov_vreg_F0(1, rm);
+ tmp = load_reg(s, rd);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm * 2);
+ tmp = load_reg(s, rn);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm * 2 + 1);
} else {
- gen_movl_T0_reg(s, rn);
- gen_op_vfp_msr();
+ tmp = load_reg(s, rn);
+ gen_vfp_msr(tmp);
gen_mov_vreg_F0(0, rm);
- gen_movl_T0_reg(s, rd);
- gen_op_vfp_msr();
+ tmp = load_reg(s, rd);
+ gen_vfp_msr(tmp);
gen_mov_vreg_F0(0, rm + 1);
}
}
@@ -959,10 +3343,14 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
/* Load/store */
rn = (insn >> 16) & 0xf;
if (dp)
- rd = (insn >> 12) & 0xf;
+ VFP_DREG_D(rd, insn);
else
- rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
- gen_movl_T1_reg(s, rn);
+ rd = VFP_SREG_D(insn);
+ if (s->thumb && rn == 15) {
+ gen_op_movl_T1_im(s->pc & ~2);
+ } else {
+ gen_movl_T1_reg(s, rn);
+ }
if ((insn & 0x01200000) == 0x01000000) {
/* Single load/store */
offset = (insn & 0xff) << 2;
@@ -991,7 +3379,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
else
offset = 4;
for (i = 0; i < n; i++) {
- if (insn & (1 << 20)) {
+ if (insn & ARM_CP_RW_BIT) {
/* load */
gen_vfp_ld(s, dp);
gen_mov_vreg_F0(dp, rd + i);
@@ -1031,51 +3419,43 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
tb = s->tb;
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
- if (n == 0)
- gen_op_goto_tb0(TBPARAM(tb));
- else
- gen_op_goto_tb1(TBPARAM(tb));
- gen_op_movl_T0_im(dest);
- gen_op_movl_r15_T0();
- gen_op_movl_T0_im((long)tb + n);
- gen_op_exit_tb();
+ tcg_gen_goto_tb(n);
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb((long)tb + n);
} else {
- gen_op_movl_T0_im(dest);
- gen_op_movl_r15_T0();
- gen_op_movl_T0_0();
- gen_op_exit_tb();
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb(0);
}
}
static inline void gen_jmp (DisasContext *s, uint32_t dest)
{
- if (__builtin_expect(s->singlestep_enabled, 0)) {
+ if (unlikely(s->singlestep_enabled)) {
/* An indirect jump so that we still trigger the debug exception. */
if (s->thumb)
- dest |= 1;
- gen_op_movl_T0_im(dest);
- gen_bx(s);
+ dest |= 1;
+ gen_bx_im(s, dest);
} else {
gen_goto_tb(s, 0, dest);
s->is_jmp = DISAS_TB_JUMP;
}
}
-static inline void gen_mulxy(int x, int y)
+static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
{
if (x)
- gen_op_sarl_T0_im(16);
+ tcg_gen_sari_i32(t0, t0, 16);
else
- gen_op_sxth_T0();
+ gen_sxth(t0);
if (y)
- gen_op_sarl_T1_im(16);
+ tcg_gen_sari_i32(t1, t1, 16);
else
- gen_op_sxth_T1();
- gen_op_mul_T0_T1();
+ gen_sxth(t1);
+ tcg_gen_mul_i32(t0, t0, t1);
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
uint32_t mask;
mask = 0;
@@ -1087,130 +3467,2486 @@ static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
mask |= 0xff0000;
if (flags & (1 << 3))
mask |= 0xff000000;
+
/* Mask out undefined bits. */
- mask &= 0xf90f03ff;
- /* Mask out state bits. */
+ mask &= ~CPSR_RESERVED;
+ if (!arm_feature(env, ARM_FEATURE_V6))
+ mask &= ~(CPSR_E | CPSR_GE);
+ if (!arm_feature(env, ARM_FEATURE_THUMB2))
+ mask &= ~CPSR_IT;
+ /* Mask out execution state bits. */
if (!spsr)
- mask &= ~0x01000020;
+ mask &= ~CPSR_EXEC;
/* Mask out privileged bits. */
if (IS_USER(s))
- mask &= 0xf80f0200;
+ mask &= CPSR_USER;
return mask;
}
/* Returns nonzero if access to the PSR is not permitted. */
static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
{
+ TCGv tmp;
if (spsr) {
/* ??? This is also undefined in system mode. */
if (IS_USER(s))
return 1;
- gen_op_movl_spsr_T0(mask);
+
+ tmp = load_cpu_field(spsr);
+ tcg_gen_andi_i32(tmp, tmp, ~mask);
+ tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
+ tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
+ store_cpu_field(tmp, spsr);
} else {
- gen_op_movl_cpsr_T0(mask);
+ gen_set_cpsr(cpu_T[0], mask);
}
gen_lookup_tb(s);
return 0;
}
+/* Generate an old-style exception return. */
static void gen_exception_return(DisasContext *s)
{
- gen_op_movl_reg_TN[0][15]();
- gen_op_movl_T0_spsr();
- gen_op_movl_cpsr_T0(0xffffffff);
+ TCGv tmp;
+ gen_movl_reg_T0(s, 15);
+ tmp = load_cpu_field(spsr);
+ gen_set_cpsr(tmp, 0xffffffff);
+ dead_tmp(tmp);
s->is_jmp = DISAS_UPDATE;
}
+/* Generate a v6 exception return. Marks both values as dead. */
+static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
+{
+ gen_set_cpsr(cpsr, 0xffffffff);
+ dead_tmp(cpsr);
+ store_reg(s, 15, pc);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static inline void
+gen_set_condexec (DisasContext *s)
+{
+ if (s->condexec_mask) {
+ uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_cpu_field(tmp, condexec_bits);
+ }
+}
+
+static void gen_nop_hint(DisasContext *s, int val)
+{
+ switch (val) {
+ case 3: /* wfi */
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_WFI;
+ break;
+ case 2: /* wfe */
+ case 4: /* sev */
+ /* TODO: Implement SEV and WFE. May help SMP performance. */
+ default: /* nop */
+ break;
+ }
+}
+
+/* These macros help make the code more readable when migrating from the
+ old dyngen helpers. They should probably be removed when
+ T0/T1 are removed. */
+#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
+#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
+
+#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
+
+static inline int gen_neon_add(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_add_u8(CPU_T001); break;
+ case 1: gen_helper_neon_add_u16(CPU_T001); break;
+ case 2: gen_op_addl_T0_T1(); break;
+ default: return 1;
+ }
+ return 0;
+}
+
+static inline void gen_neon_rsb(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
+ case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
+ case 2: gen_op_rsbl_T0_T1(); break;
+ default: return;
+ }
+}
+
+/* 32-bit pairwise ops end up the same as the elementwise versions. */
+#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
+#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
+#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
+#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
+
+/* FIXME: This is wrong. They set the wrong overflow bit. */
+#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
+#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
+#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
+#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
+
+#define GEN_NEON_INTEGER_OP_ENV(name) do { \
+ switch ((size << 1) | u) { \
+ case 0: \
+ gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ case 1: \
+ gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ case 2: \
+ gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ case 3: \
+ gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ case 4: \
+ gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ case 5: \
+ gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+ break; \
+ default: return 1; \
+ }} while (0)
+
+#define GEN_NEON_INTEGER_OP(name) do { \
+ switch ((size << 1) | u) { \
+ case 0: \
+ gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ case 1: \
+ gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ case 2: \
+ gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ case 3: \
+ gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ case 4: \
+ gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ case 5: \
+ gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+ break; \
+ default: return 1; \
+ }} while (0)
+
+static inline void
+gen_neon_movl_scratch_T0(int scratch)
+{
+ uint32_t offset;
+
+ offset = offsetof(CPUARMState, vfp.scratch[scratch]);
+ tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
+}
+
+static inline void
+gen_neon_movl_scratch_T1(int scratch)
+{
+ uint32_t offset;
+
+ offset = offsetof(CPUARMState, vfp.scratch[scratch]);
+ tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
+}
+
+static inline void
+gen_neon_movl_T0_scratch(int scratch)
+{
+ uint32_t offset;
+
+ offset = offsetof(CPUARMState, vfp.scratch[scratch]);
+ tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
+}
+
+static inline void
+gen_neon_movl_T1_scratch(int scratch)
+{
+ uint32_t offset;
+
+ offset = offsetof(CPUARMState, vfp.scratch[scratch]);
+ tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
+}
+
+static inline void gen_neon_get_scalar(int size, int reg)
+{
+ if (size == 1) {
+ NEON_GET_REG(T0, reg >> 1, reg & 1);
+ } else {
+ NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
+ if (reg & 1)
+ gen_neon_dup_low16(cpu_T[0]);
+ else
+ gen_neon_dup_high16(cpu_T[0]);
+ }
+}
+
+static void gen_neon_unzip(int reg, int q, int tmp, int size)
+{
+ int n;
+
+ for (n = 0; n < q + 1; n += 2) {
+ NEON_GET_REG(T0, reg, n);
+ NEON_GET_REG(T0, reg, n + n);
+ switch (size) {
+ case 0: gen_helper_neon_unzip_u8(); break;
+ case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
+ case 2: /* no-op */; break;
+ default: abort();
+ }
+ gen_neon_movl_scratch_T0(tmp + n);
+ gen_neon_movl_scratch_T1(tmp + n + 1);
+ }
+}
+
+static struct {
+ int nregs;
+ int interleave;
+ int spacing;
+} neon_ls_element_type[11] = {
+ {4, 4, 1},
+ {4, 4, 2},
+ {4, 1, 1},
+ {4, 2, 1},
+ {3, 3, 1},
+ {3, 3, 2},
+ {3, 1, 1},
+ {1, 1, 1},
+ {2, 2, 1},
+ {2, 2, 2},
+ {2, 1, 1}
+};
+
+/* Translate a NEON load/store element instruction. Return nonzero if the
+ instruction is invalid. */
+static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int rd, rn, rm;
+ int op;
+ int nregs;
+ int interleave;
+ int stride;
+ int size;
+ int reg;
+ int pass;
+ int load;
+ int shift;
+ int n;
+ TCGv tmp;
+ TCGv tmp2;
+
+ if (!vfp_enabled(env))
+ return 1;
+ VFP_DREG_D(rd, insn);
+ rn = (insn >> 16) & 0xf;
+ rm = insn & 0xf;
+ load = (insn & (1 << 21)) != 0;
+ if ((insn & (1 << 23)) == 0) {
+ /* Load store all elements. */
+ op = (insn >> 8) & 0xf;
+ size = (insn >> 6) & 3;
+ if (op > 10 || size == 3)
+ return 1;
+ nregs = neon_ls_element_type[op].nregs;
+ interleave = neon_ls_element_type[op].interleave;
+ gen_movl_T1_reg(s, rn);
+ stride = (1 << size) * interleave;
+ for (reg = 0; reg < nregs; reg++) {
+ if (interleave > 2 || (interleave == 2 && nregs == 2)) {
+ gen_movl_T1_reg(s, rn);
+ gen_op_addl_T1_im((1 << size) * reg);
+ } else if (interleave == 2 && nregs == 4 && reg == 2) {
+ gen_movl_T1_reg(s, rn);
+ gen_op_addl_T1_im(1 << size);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ if (size == 2) {
+ if (load) {
+ tmp = gen_ld32(cpu_T[1], IS_USER(s));
+ neon_store_reg(rd, pass, tmp);
+ } else {
+ tmp = neon_load_reg(rd, pass);
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ }
+ gen_op_addl_T1_im(stride);
+ } else if (size == 1) {
+ if (load) {
+ tmp = gen_ld16u(cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
+ dead_tmp(tmp2);
+ neon_store_reg(rd, pass, tmp);
+ } else {
+ tmp = neon_load_reg(rd, pass);
+ tmp2 = new_tmp();
+ tcg_gen_shri_i32(tmp2, tmp, 16);
+ gen_st16(tmp, cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ gen_st16(tmp2, cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ }
+ } else /* size == 0 */ {
+ if (load) {
+ TCGV_UNUSED(tmp2);
+ for (n = 0; n < 4; n++) {
+ tmp = gen_ld8u(cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ if (n == 0) {
+ tmp2 = tmp;
+ } else {
+ gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
+ dead_tmp(tmp);
+ }
+ }
+ neon_store_reg(rd, pass, tmp2);
+ } else {
+ tmp2 = neon_load_reg(rd, pass);
+ for (n = 0; n < 4; n++) {
+ tmp = new_tmp();
+ if (n == 0) {
+ tcg_gen_mov_i32(tmp, tmp2);
+ } else {
+ tcg_gen_shri_i32(tmp, tmp2, n * 8);
+ }
+ gen_st8(tmp, cpu_T[1], IS_USER(s));
+ gen_op_addl_T1_im(stride);
+ }
+ dead_tmp(tmp2);
+ }
+ }
+ }
+ rd += neon_ls_element_type[op].spacing;
+ }
+ stride = nregs * 8;
+ } else {
+ size = (insn >> 10) & 3;
+ if (size == 3) {
+ /* Load single element to all lanes. */
+ if (!load)
+ return 1;
+ size = (insn >> 6) & 3;
+ nregs = ((insn >> 8) & 3) + 1;
+ stride = (insn & (1 << 5)) ? 2 : 1;
+ gen_movl_T1_reg(s, rn);
+ for (reg = 0; reg < nregs; reg++) {
+ switch (size) {
+ case 0:
+ tmp = gen_ld8u(cpu_T[1], IS_USER(s));
+ gen_neon_dup_u8(tmp, 0);
+ break;
+ case 1:
+ tmp = gen_ld16u(cpu_T[1], IS_USER(s));
+ gen_neon_dup_low16(tmp);
+ break;
+ case 2:
+ tmp = gen_ld32(cpu_T[0], IS_USER(s));
+ break;
+ case 3:
+ return 1;
+ default: /* Avoid compiler warnings. */
+ abort();
+ }
+ gen_op_addl_T1_im(1 << size);
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 0, tmp);
+ rd += stride;
+ }
+ stride = (1 << size) * nregs;
+ } else {
+ /* Single element. */
+ pass = (insn >> 7) & 1;
+ switch (size) {
+ case 0:
+ shift = ((insn >> 5) & 3) * 8;
+ stride = 1;
+ break;
+ case 1:
+ shift = ((insn >> 6) & 1) * 16;
+ stride = (insn & (1 << 5)) ? 2 : 1;
+ break;
+ case 2:
+ shift = 0;
+ stride = (insn & (1 << 6)) ? 2 : 1;
+ break;
+ default:
+ abort();
+ }
+ nregs = ((insn >> 8) & 3) + 1;
+ gen_movl_T1_reg(s, rn);
+ for (reg = 0; reg < nregs; reg++) {
+ if (load) {
+ switch (size) {
+ case 0:
+ tmp = gen_ld8u(cpu_T[1], IS_USER(s));
+ break;
+ case 1:
+ tmp = gen_ld16u(cpu_T[1], IS_USER(s));
+ break;
+ case 2:
+ tmp = gen_ld32(cpu_T[1], IS_USER(s));
+ break;
+ default: /* Avoid compiler warnings. */
+ abort();
+ }
+ if (size != 2) {
+ tmp2 = neon_load_reg(rd, pass);
+ gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
+ dead_tmp(tmp2);
+ }
+ neon_store_reg(rd, pass, tmp);
+ } else { /* Store */
+ tmp = neon_load_reg(rd, pass);
+ if (shift)
+ tcg_gen_shri_i32(tmp, tmp, shift);
+ switch (size) {
+ case 0:
+ gen_st8(tmp, cpu_T[1], IS_USER(s));
+ break;
+ case 1:
+ gen_st16(tmp, cpu_T[1], IS_USER(s));
+ break;
+ case 2:
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ break;
+ }
+ }
+ rd += stride;
+ gen_op_addl_T1_im(1 << size);
+ }
+ stride = nregs * (1 << size);
+ }
+ }
+ if (rm != 15) {
+ TCGv base;
+
+ base = load_reg(s, rn);
+ if (rm == 13) {
+ tcg_gen_addi_i32(base, base, stride);
+ } else {
+ TCGv index;
+ index = load_reg(s, rm);
+ tcg_gen_add_i32(base, base, index);
+ dead_tmp(index);
+ }
+ store_reg(s, rn, base);
+ }
+ return 0;
+}
+
+/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
+static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
+{
+ tcg_gen_and_i32(t, t, c);
+ tcg_gen_bic_i32(f, f, c);
+ tcg_gen_or_i32(dest, t, f);
+}
+
+static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_u8(dest, src); break;
+ case 1: gen_helper_neon_narrow_u16(dest, src); break;
+ case 2: tcg_gen_trunc_i64_i32(dest, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
+ int q, int u)
+{
+ if (q) {
+ if (u) {
+ switch (size) {
+ case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
+ case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
+ case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
+ default: abort();
+ }
+ }
+ } else {
+ if (u) {
+ switch (size) {
+ case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
+ case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 1: gen_helper_neon_shl_s16(var, var, shift); break;
+ case 2: gen_helper_neon_shl_s32(var, var, shift); break;
+ default: abort();
+ }
+ }
+ }
+}
+
+static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
+{
+ if (u) {
+ switch (size) {
+ case 0: gen_helper_neon_widen_u8(dest, src); break;
+ case 1: gen_helper_neon_widen_u16(dest, src); break;
+ case 2: tcg_gen_extu_i32_i64(dest, src); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 0: gen_helper_neon_widen_s8(dest, src); break;
+ case 1: gen_helper_neon_widen_s16(dest, src); break;
+ case 2: tcg_gen_ext_i32_i64(dest, src); break;
+ default: abort();
+ }
+ }
+ dead_tmp(src);
+}
+
+static inline void gen_neon_addl(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_addl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_addl_u32(CPU_V001); break;
+ case 2: tcg_gen_add_i64(CPU_V001); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_subl(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_subl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_subl_u32(CPU_V001); break;
+ case 2: tcg_gen_sub_i64(CPU_V001); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_negl(TCGv var, int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_negl_u16(var, var); break;
+ case 1: gen_helper_neon_negl_u32(var, var); break;
+ case 2: gen_helper_neon_negl_u64(var, var); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
+{
+ switch (size) {
+ case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
+ case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
+{
+ TCGv tmp;
+
+ switch ((size << 1) | u) {
+ case 0: gen_helper_neon_mull_s8(dest, a, b); break;
+ case 1: gen_helper_neon_mull_u8(dest, a, b); break;
+ case 2: gen_helper_neon_mull_s16(dest, a, b); break;
+ case 3: gen_helper_neon_mull_u16(dest, a, b); break;
+ case 4:
+ tmp = gen_muls_i64_i32(a, b);
+ tcg_gen_mov_i64(dest, tmp);
+ break;
+ case 5:
+ tmp = gen_mulu_i64_i32(a, b);
+ tcg_gen_mov_i64(dest, tmp);
+ break;
+ default: abort();
+ }
+ if (size < 2) {
+ dead_tmp(b);
+ dead_tmp(a);
+ }
+}
+
+/* Translate a NEON data processing instruction. Return nonzero if the
+ instruction is invalid.
+ We process data in a mixture of 32-bit and 64-bit chunks.
+ Mostly we use 32-bit chunks so we can use normal scalar instructions. */
+
+static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int op;
+ int q;
+ int rd, rn, rm;
+ int size;
+ int shift;
+ int pass;
+ int count;
+ int pairwise;
+ int u;
+ int n;
+ uint32_t imm;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv tmp3;
+
+ if (!vfp_enabled(env))
+ return 1;
+ q = (insn & (1 << 6)) != 0;
+ u = (insn >> 24) & 1;
+ VFP_DREG_D(rd, insn);
+ VFP_DREG_N(rn, insn);
+ VFP_DREG_M(rm, insn);
+ size = (insn >> 20) & 3;
+ if ((insn & (1 << 23)) == 0) {
+ /* Three register same length. */
+ op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
+ if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
+ || op == 10 || op == 11 || op == 16)) {
+ /* 64-bit element instructions. */
+ for (pass = 0; pass < (q ? 2 : 1); pass++) {
+ neon_load_reg64(cpu_V0, rn + pass);
+ neon_load_reg64(cpu_V1, rm + pass);
+ switch (op) {
+ case 1: /* VQADD */
+ if (u) {
+ gen_helper_neon_add_saturate_u64(CPU_V001);
+ } else {
+ gen_helper_neon_add_saturate_s64(CPU_V001);
+ }
+ break;
+ case 5: /* VQSUB */
+ if (u) {
+ gen_helper_neon_sub_saturate_u64(CPU_V001);
+ } else {
+ gen_helper_neon_sub_saturate_s64(CPU_V001);
+ }
+ break;
+ case 8: /* VSHL */
+ if (u) {
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
+ }
+ break;
+ case 9: /* VQSHL */
+ if (u) {
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V0);
+ } else {
+ gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
+ cpu_V1, cpu_V0);
+ }
+ break;
+ case 10: /* VRSHL */
+ if (u) {
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ }
+ break;
+ case 11: /* VQRSHL */
+ if (u) {
+ gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ }
+ break;
+ case 16:
+ if (u) {
+ tcg_gen_sub_i64(CPU_V001);
+ } else {
+ tcg_gen_add_i64(CPU_V001);
+ }
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ return 0;
+ }
+ switch (op) {
+ case 8: /* VSHL */
+ case 9: /* VQSHL */
+ case 10: /* VRSHL */
+ case 11: /* VQRSHL */
+ {
+ int rtmp;
+ /* Shift instruction operands are reversed. */
+ rtmp = rn;
+ rn = rm;
+ rm = rtmp;
+ pairwise = 0;
+ }
+ break;
+ case 20: /* VPMAX */
+ case 21: /* VPMIN */
+ case 23: /* VPADD */
+ pairwise = 1;
+ break;
+ case 26: /* VPADD (float) */
+ pairwise = (u && size < 2);
+ break;
+ case 30: /* VPMIN/VPMAX (float) */
+ pairwise = u;
+ break;
+ default:
+ pairwise = 0;
+ break;
+ }
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+
+ if (pairwise) {
+ /* Pairwise. */
+ if (q)
+ n = (pass & 1) * 2;
+ else
+ n = 0;
+ if (pass < q + 1) {
+ NEON_GET_REG(T0, rn, n);
+ NEON_GET_REG(T1, rn, n + 1);
+ } else {
+ NEON_GET_REG(T0, rm, n);
+ NEON_GET_REG(T1, rm, n + 1);
+ }
+ } else {
+ /* Elementwise. */
+ NEON_GET_REG(T0, rn, pass);
+ NEON_GET_REG(T1, rm, pass);
+ }
+ switch (op) {
+ case 0: /* VHADD */
+ GEN_NEON_INTEGER_OP(hadd);
+ break;
+ case 1: /* VQADD */
+ GEN_NEON_INTEGER_OP_ENV(qadd);
+ break;
+ case 2: /* VRHADD */
+ GEN_NEON_INTEGER_OP(rhadd);
+ break;
+ case 3: /* Logic ops. */
+ switch ((u << 2) | size) {
+ case 0: /* VAND */
+ gen_op_andl_T0_T1();
+ break;
+ case 1: /* BIC */
+ gen_op_bicl_T0_T1();
+ break;
+ case 2: /* VORR */
+ gen_op_orl_T0_T1();
+ break;
+ case 3: /* VORN */
+ gen_op_notl_T1();
+ gen_op_orl_T0_T1();
+ break;
+ case 4: /* VEOR */
+ gen_op_xorl_T0_T1();
+ break;
+ case 5: /* VBSL */
+ tmp = neon_load_reg(rd, pass);
+ gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
+ dead_tmp(tmp);
+ break;
+ case 6: /* VBIT */
+ tmp = neon_load_reg(rd, pass);
+ gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
+ dead_tmp(tmp);
+ break;
+ case 7: /* VBIF */
+ tmp = neon_load_reg(rd, pass);
+ gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
+ dead_tmp(tmp);
+ break;
+ }
+ break;
+ case 4: /* VHSUB */
+ GEN_NEON_INTEGER_OP(hsub);
+ break;
+ case 5: /* VQSUB */
+ GEN_NEON_INTEGER_OP_ENV(qsub);
+ break;
+ case 6: /* VCGT */
+ GEN_NEON_INTEGER_OP(cgt);
+ break;
+ case 7: /* VCGE */
+ GEN_NEON_INTEGER_OP(cge);
+ break;
+ case 8: /* VSHL */
+ GEN_NEON_INTEGER_OP(shl);
+ break;
+ case 9: /* VQSHL */
+ GEN_NEON_INTEGER_OP_ENV(qshl);
+ break;
+ case 10: /* VRSHL */
+ GEN_NEON_INTEGER_OP(rshl);
+ break;
+ case 11: /* VQRSHL */
+ GEN_NEON_INTEGER_OP_ENV(qrshl);
+ break;
+ case 12: /* VMAX */
+ GEN_NEON_INTEGER_OP(max);
+ break;
+ case 13: /* VMIN */
+ GEN_NEON_INTEGER_OP(min);
+ break;
+ case 14: /* VABD */
+ GEN_NEON_INTEGER_OP(abd);
+ break;
+ case 15: /* VABA */
+ GEN_NEON_INTEGER_OP(abd);
+ NEON_GET_REG(T1, rd, pass);
+ gen_neon_add(size);
+ break;
+ case 16:
+ if (!u) { /* VADD */
+ if (gen_neon_add(size))
+ return 1;
+ } else { /* VSUB */
+ switch (size) {
+ case 0: gen_helper_neon_sub_u8(CPU_T001); break;
+ case 1: gen_helper_neon_sub_u16(CPU_T001); break;
+ case 2: gen_op_subl_T0_T1(); break;
+ default: return 1;
+ }
+ }
+ break;
+ case 17:
+ if (!u) { /* VTST */
+ switch (size) {
+ case 0: gen_helper_neon_tst_u8(CPU_T001); break;
+ case 1: gen_helper_neon_tst_u16(CPU_T001); break;
+ case 2: gen_helper_neon_tst_u32(CPU_T001); break;
+ default: return 1;
+ }
+ } else { /* VCEQ */
+ switch (size) {
+ case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
+ case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
+ case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
+ default: return 1;
+ }
+ }
+ break;
+ case 18: /* Multiply. */
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+ case 1: gen_helper_neon_mul_u16(CPU_T001); break;
+ case 2: gen_op_mul_T0_T1(); break;
+ default: return 1;
+ }
+ NEON_GET_REG(T1, rd, pass);
+ if (u) { /* VMLS */
+ gen_neon_rsb(size);
+ } else { /* VMLA */
+ gen_neon_add(size);
+ }
+ break;
+ case 19: /* VMUL */
+ if (u) { /* polynomial */
+ gen_helper_neon_mul_p8(CPU_T001);
+ } else { /* Integer */
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+ case 1: gen_helper_neon_mul_u16(CPU_T001); break;
+ case 2: gen_op_mul_T0_T1(); break;
+ default: return 1;
+ }
+ }
+ break;
+ case 20: /* VPMAX */
+ GEN_NEON_INTEGER_OP(pmax);
+ break;
+ case 21: /* VPMIN */
+ GEN_NEON_INTEGER_OP(pmin);
+ break;
+ case 22: /* Hultiply high. */
+ if (!u) { /* VQDMULH */
+ switch (size) {
+ case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
+ case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
+ default: return 1;
+ }
+ } else { /* VQRDHMUL */
+ switch (size) {
+ case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
+ case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
+ default: return 1;
+ }
+ }
+ break;
+ case 23: /* VPADD */
+ if (u)
+ return 1;
+ switch (size) {
+ case 0: gen_helper_neon_padd_u8(CPU_T001); break;
+ case 1: gen_helper_neon_padd_u16(CPU_T001); break;
+ case 2: gen_op_addl_T0_T1(); break;
+ default: return 1;
+ }
+ break;
+ case 26: /* Floating point arithnetic. */
+ switch ((u << 2) | size) {
+ case 0: /* VADD */
+ gen_helper_neon_add_f32(CPU_T001);
+ break;
+ case 2: /* VSUB */
+ gen_helper_neon_sub_f32(CPU_T001);
+ break;
+ case 4: /* VPADD */
+ gen_helper_neon_add_f32(CPU_T001);
+ break;
+ case 6: /* VABD */
+ gen_helper_neon_abd_f32(CPU_T001);
+ break;
+ default:
+ return 1;
+ }
+ break;
+ case 27: /* Float multiply. */
+ gen_helper_neon_mul_f32(CPU_T001);
+ if (!u) {
+ NEON_GET_REG(T1, rd, pass);
+ if (size == 0) {
+ gen_helper_neon_add_f32(CPU_T001);
+ } else {
+ gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
+ }
+ }
+ break;
+ case 28: /* Float compare. */
+ if (!u) {
+ gen_helper_neon_ceq_f32(CPU_T001);
+ } else {
+ if (size == 0)
+ gen_helper_neon_cge_f32(CPU_T001);
+ else
+ gen_helper_neon_cgt_f32(CPU_T001);
+ }
+ break;
+ case 29: /* Float compare absolute. */
+ if (!u)
+ return 1;
+ if (size == 0)
+ gen_helper_neon_acge_f32(CPU_T001);
+ else
+ gen_helper_neon_acgt_f32(CPU_T001);
+ break;
+ case 30: /* Float min/max. */
+ if (size == 0)
+ gen_helper_neon_max_f32(CPU_T001);
+ else
+ gen_helper_neon_min_f32(CPU_T001);
+ break;
+ case 31:
+ if (size == 0)
+ gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
+ else
+ gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
+ break;
+ default:
+ abort();
+ }
+ /* Save the result. For elementwise operations we can put it
+ straight into the destination register. For pairwise operations
+ we have to be careful to avoid clobbering the source operands. */
+ if (pairwise && rd == rm) {
+ gen_neon_movl_scratch_T0(pass);
+ } else {
+ NEON_SET_REG(T0, rd, pass);
+ }
+
+ } /* for pass */
+ if (pairwise && rd == rm) {
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ gen_neon_movl_T0_scratch(pass);
+ NEON_SET_REG(T0, rd, pass);
+ }
+ }
+ /* End of 3 register same size operations. */
+ } else if (insn & (1 << 4)) {
+ if ((insn & 0x00380080) != 0) {
+ /* Two registers and shift. */
+ op = (insn >> 8) & 0xf;
+ if (insn & (1 << 7)) {
+ /* 64-bit shift. */
+ size = 3;
+ } else {
+ size = 2;
+ while ((insn & (1 << (size + 19))) == 0)
+ size--;
+ }
+ shift = (insn >> 16) & ((1 << (3 + size)) - 1);
+ /* To avoid excessive dumplication of ops we implement shift
+ by immediate using the variable shift operations. */
+ if (op < 8) {
+ /* Shift by immediate:
+ VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
+ /* Right shifts are encoded as N - shift, where N is the
+ element size in bits. */
+ if (op <= 4)
+ shift = shift - (1 << (size + 3));
+ if (size == 3) {
+ count = q + 1;
+ } else {
+ count = q ? 4: 2;
+ }
+ switch (size) {
+ case 0:
+ imm = (uint8_t) shift;
+ imm |= imm << 8;
+ imm |= imm << 16;
+ break;
+ case 1:
+ imm = (uint16_t) shift;
+ imm |= imm << 16;
+ break;
+ case 2:
+ case 3:
+ imm = shift;
+ break;
+ default:
+ abort();
+ }
+
+ for (pass = 0; pass < count; pass++) {
+ if (size == 3) {
+ neon_load_reg64(cpu_V0, rm + pass);
+ tcg_gen_movi_i64(cpu_V1, imm);
+ switch (op) {
+ case 0: /* VSHR */
+ case 1: /* VSRA */
+ if (u)
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+ else
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 2: /* VRSHR */
+ case 3: /* VRSRA */
+ if (u)
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
+ else
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 4: /* VSRI */
+ if (!u)
+ return 1;
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 5: /* VSHL, VSLI */
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 6: /* VQSHL */
+ if (u)
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
+ else
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
+ break;
+ case 7: /* VQSHLU */
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
+ break;
+ }
+ if (op == 1 || op == 3) {
+ /* Accumulate. */
+ neon_load_reg64(cpu_V0, rd + pass);
+ tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
+ } else if (op == 4 || (op == 5 && u)) {
+ /* Insert */
+ cpu_abort(env, "VS[LR]I.64 not implemented");
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ } else { /* size < 3 */
+ /* Operands in T0 and T1. */
+ gen_op_movl_T1_im(imm);
+ NEON_GET_REG(T0, rm, pass);
+ switch (op) {
+ case 0: /* VSHR */
+ case 1: /* VSRA */
+ GEN_NEON_INTEGER_OP(shl);
+ break;
+ case 2: /* VRSHR */
+ case 3: /* VRSRA */
+ GEN_NEON_INTEGER_OP(rshl);
+ break;
+ case 4: /* VSRI */
+ if (!u)
+ return 1;
+ GEN_NEON_INTEGER_OP(shl);
+ break;
+ case 5: /* VSHL, VSLI */
+ switch (size) {
+ case 0: gen_helper_neon_shl_u8(CPU_T001); break;
+ case 1: gen_helper_neon_shl_u16(CPU_T001); break;
+ case 2: gen_helper_neon_shl_u32(CPU_T001); break;
+ default: return 1;
+ }
+ break;
+ case 6: /* VQSHL */
+ GEN_NEON_INTEGER_OP_ENV(qshl);
+ break;
+ case 7: /* VQSHLU */
+ switch (size) {
+ case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
+ case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
+ case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
+ default: return 1;
+ }
+ break;
+ }
+
+ if (op == 1 || op == 3) {
+ /* Accumulate. */
+ NEON_GET_REG(T1, rd, pass);
+ gen_neon_add(size);
+ } else if (op == 4 || (op == 5 && u)) {
+ /* Insert */
+ switch (size) {
+ case 0:
+ if (op == 4)
+ imm = 0xff >> -shift;
+ else
+ imm = (uint8_t)(0xff << shift);
+ imm |= imm << 8;
+ imm |= imm << 16;
+ break;
+ case 1:
+ if (op == 4)
+ imm = 0xffff >> -shift;
+ else
+ imm = (uint16_t)(0xffff << shift);
+ imm |= imm << 16;
+ break;
+ case 2:
+ if (op == 4)
+ imm = 0xffffffffu >> -shift;
+ else
+ imm = 0xffffffffu << shift;
+ break;
+ default:
+ abort();
+ }
+ tmp = neon_load_reg(rd, pass);
+ tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
+ tcg_gen_andi_i32(tmp, tmp, ~imm);
+ tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
+ }
+ NEON_SET_REG(T0, rd, pass);
+ }
+ } /* for pass */
+ } else if (op < 10) {
+ /* Shift by immediate and narrow:
+ VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
+ shift = shift - (1 << (size + 3));
+ size++;
+ switch (size) {
+ case 1:
+ imm = (uint16_t)shift;
+ imm |= imm << 16;
+ tmp2 = tcg_const_i32(imm);
+ break;
+ case 2:
+ imm = (uint32_t)shift;
+ tmp2 = tcg_const_i32(imm);
+ case 3:
+ tmp2 = tcg_const_i64(shift);
+ break;
+ default:
+ abort();
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ if (size == 3) {
+ neon_load_reg64(cpu_V0, rm + pass);
+ if (q) {
+ if (u)
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
+ else
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
+ } else {
+ if (u)
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
+ else
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
+ }
+ } else {
+ tmp = neon_load_reg(rm + pass, 0);
+ gen_neon_shift_narrow(size, tmp, tmp2, q, u);
+ tcg_gen_extu_i32_i64(cpu_V0, tmp);
+ dead_tmp(tmp);
+ tmp = neon_load_reg(rm + pass, 1);
+ gen_neon_shift_narrow(size, tmp, tmp2, q, u);
+ tcg_gen_extu_i32_i64(cpu_V1, tmp);
+ dead_tmp(tmp);
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ }
+ tmp = new_tmp();
+ if (op == 8 && !u) {
+ gen_neon_narrow(size - 1, tmp, cpu_V0);
+ } else {
+ if (op == 8)
+ gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
+ else
+ gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
+ }
+ if (pass == 0) {
+ tmp2 = tmp;
+ } else {
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 1, tmp);
+ }
+ } /* for pass */
+ } else if (op == 10) {
+ /* VSHLL */
+ if (q || size == 3)
+ return 1;
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 1)
+ tmp = tmp2;
+
+ gen_neon_widen(cpu_V0, tmp, size, u);
+
+ if (shift != 0) {
+ /* The shift is less than the width of the source
+ type, so we can just shift the whole register. */
+ tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
+ if (size < 2 || !u) {
+ uint64_t imm64;
+ if (size == 0) {
+ imm = (0xffu >> (8 - shift));
+ imm |= imm << 16;
+ } else {
+ imm = 0xffff >> (16 - shift);
+ }
+ imm64 = imm | (((uint64_t)imm) << 32);
+ tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
+ }
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ } else if (op == 15 || op == 16) {
+ /* VCVT fixed-point. */
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
+ if (op & 1) {
+ if (u)
+ gen_vfp_ulto(0, shift);
+ else
+ gen_vfp_slto(0, shift);
+ } else {
+ if (u)
+ gen_vfp_toul(0, shift);
+ else
+ gen_vfp_tosl(0, shift);
+ }
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
+ }
+ } else {
+ return 1;
+ }
+ } else { /* (insn & 0x00380080) == 0 */
+ int invert;
+
+ op = (insn >> 8) & 0xf;
+ /* One register and immediate. */
+ imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
+ invert = (insn & (1 << 5)) != 0;
+ switch (op) {
+ case 0: case 1:
+ /* no-op */
+ break;
+ case 2: case 3:
+ imm <<= 8;
+ break;
+ case 4: case 5:
+ imm <<= 16;
+ break;
+ case 6: case 7:
+ imm <<= 24;
+ break;
+ case 8: case 9:
+ imm |= imm << 16;
+ break;
+ case 10: case 11:
+ imm = (imm << 8) | (imm << 24);
+ break;
+ case 12:
+ imm = (imm < 8) | 0xff;
+ break;
+ case 13:
+ imm = (imm << 16) | 0xffff;
+ break;
+ case 14:
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
+ if (invert)
+ imm = ~imm;
+ break;
+ case 15:
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
+ break;
+ }
+ if (invert)
+ imm = ~imm;
+
+ if (op != 14 || !invert)
+ gen_op_movl_T1_im(imm);
+
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ if (op & 1 && op < 12) {
+ tmp = neon_load_reg(rd, pass);
+ if (invert) {
+ /* The immediate value has already been inverted, so
+ BIC becomes AND. */
+ tcg_gen_andi_i32(tmp, tmp, imm);
+ } else {
+ tcg_gen_ori_i32(tmp, tmp, imm);
+ }
+ } else {
+ /* VMOV, VMVN. */
+ tmp = new_tmp();
+ if (op == 14 && invert) {
+ uint32_t val;
+ val = 0;
+ for (n = 0; n < 4; n++) {
+ if (imm & (1 << (n + (pass & 1) * 4)))
+ val |= 0xff << (n * 8);
+ }
+ tcg_gen_movi_i32(tmp, val);
+ } else {
+ tcg_gen_movi_i32(tmp, imm);
+ }
+ }
+ neon_store_reg(rd, pass, tmp);
+ }
+ }
+ } else { /* (insn & 0x00800010 == 0x00800010) */
+ if (size != 3) {
+ op = (insn >> 8) & 0xf;
+ if ((insn & (1 << 6)) == 0) {
+ /* Three registers of different lengths. */
+ int src1_wide;
+ int src2_wide;
+ int prewiden;
+ /* prewiden, src1_wide, src2_wide */
+ static const int neon_3reg_wide[16][3] = {
+ {1, 0, 0}, /* VADDL */
+ {1, 1, 0}, /* VADDW */
+ {1, 0, 0}, /* VSUBL */
+ {1, 1, 0}, /* VSUBW */
+ {0, 1, 1}, /* VADDHN */
+ {0, 0, 0}, /* VABAL */
+ {0, 1, 1}, /* VSUBHN */
+ {0, 0, 0}, /* VABDL */
+ {0, 0, 0}, /* VMLAL */
+ {0, 0, 0}, /* VQDMLAL */
+ {0, 0, 0}, /* VMLSL */
+ {0, 0, 0}, /* VQDMLSL */
+ {0, 0, 0}, /* Integer VMULL */
+ {0, 0, 0}, /* VQDMULL */
+ {0, 0, 0} /* Polynomial VMULL */
+ };
+
+ prewiden = neon_3reg_wide[op][0];
+ src1_wide = neon_3reg_wide[op][1];
+ src2_wide = neon_3reg_wide[op][2];
+
+ if (size == 0 && (op == 9 || op == 11 || op == 13))
+ return 1;
+
+ /* Avoid overlapping operands. Wide source operands are
+ always aligned so will never overlap with wide
+ destinations in problematic ways. */
+ if (rd == rm && !src2_wide) {
+ NEON_GET_REG(T0, rm, 1);
+ gen_neon_movl_scratch_T0(2);
+ } else if (rd == rn && !src1_wide) {
+ NEON_GET_REG(T0, rn, 1);
+ gen_neon_movl_scratch_T0(2);
+ }
+ TCGV_UNUSED(tmp3);
+ for (pass = 0; pass < 2; pass++) {
+ if (src1_wide) {
+ neon_load_reg64(cpu_V0, rn + pass);
+ TCGV_UNUSED(tmp);
+ } else {
+ if (pass == 1 && rd == rn) {
+ gen_neon_movl_T0_scratch(2);
+ tmp = new_tmp();
+ tcg_gen_mov_i32(tmp, cpu_T[0]);
+ } else {
+ tmp = neon_load_reg(rn, pass);
+ }
+ if (prewiden) {
+ gen_neon_widen(cpu_V0, tmp, size, u);
+ }
+ }
+ if (src2_wide) {
+ neon_load_reg64(cpu_V1, rm + pass);
+ TCGV_UNUSED(tmp2);
+ } else {
+ if (pass == 1 && rd == rm) {
+ gen_neon_movl_T0_scratch(2);
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, cpu_T[0]);
+ } else {
+ tmp2 = neon_load_reg(rm, pass);
+ }
+ if (prewiden) {
+ gen_neon_widen(cpu_V1, tmp2, size, u);
+ }
+ }
+ switch (op) {
+ case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
+ gen_neon_addl(size);
+ break;
+ case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
+ gen_neon_subl(size);
+ break;
+ case 5: case 7: /* VABAL, VABDL */
+ switch ((size << 1) | u) {
+ case 0:
+ gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
+ break;
+ case 3:
+ gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
+ break;
+ case 4:
+ gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
+ break;
+ case 5:
+ gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
+ break;
+ default: abort();
+ }
+ dead_tmp(tmp2);
+ dead_tmp(tmp);
+ break;
+ case 8: case 9: case 10: case 11: case 12: case 13:
+ /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
+ gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+ break;
+ case 14: /* Polynomial VMULL */
+ cpu_abort(env, "Polynomial VMULL not implemented");
+
+ default: /* 15 is RESERVED. */
+ return 1;
+ }
+ if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
+ /* Accumulate. */
+ if (op == 10 || op == 11) {
+ gen_neon_negl(cpu_V0, size);
+ }
+
+ if (op != 13) {
+ neon_load_reg64(cpu_V1, rd + pass);
+ }
+
+ switch (op) {
+ case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
+ gen_neon_addl(size);
+ break;
+ case 9: case 11: /* VQDMLAL, VQDMLSL */
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
+ break;
+ /* Fall through. */
+ case 13: /* VQDMULL */
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ } else if (op == 4 || op == 6) {
+ /* Narrowing operation. */
+ tmp = new_tmp();
+ if (u) {
+ switch (size) {
+ case 0:
+ gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
+ break;
+ case 1:
+ gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
+ break;
+ case 2:
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 0:
+ gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
+ break;
+ case 1:
+ gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
+ break;
+ case 2:
+ tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ break;
+ default: abort();
+ }
+ }
+ if (pass == 0) {
+ tmp3 = tmp;
+ } else {
+ neon_store_reg(rd, 0, tmp3);
+ neon_store_reg(rd, 1, tmp);
+ }
+ } else {
+ /* Write back the result. */
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ }
+ } else {
+ /* Two registers and a scalar. */
+ switch (op) {
+ case 0: /* Integer VMLA scalar */
+ case 1: /* Float VMLA scalar */
+ case 4: /* Integer VMLS scalar */
+ case 5: /* Floating point VMLS scalar */
+ case 8: /* Integer VMUL scalar */
+ case 9: /* Floating point VMUL scalar */
+ case 12: /* VQDMULH scalar */
+ case 13: /* VQRDMULH scalar */
+ gen_neon_get_scalar(size, rm);
+ gen_neon_movl_scratch_T0(0);
+ for (pass = 0; pass < (u ? 4 : 2); pass++) {
+ if (pass != 0)
+ gen_neon_movl_T0_scratch(0);
+ NEON_GET_REG(T1, rn, pass);
+ if (op == 12) {
+ if (size == 1) {
+ gen_helper_neon_qdmulh_s16(CPU_T0E01);
+ } else {
+ gen_helper_neon_qdmulh_s32(CPU_T0E01);
+ }
+ } else if (op == 13) {
+ if (size == 1) {
+ gen_helper_neon_qrdmulh_s16(CPU_T0E01);
+ } else {
+ gen_helper_neon_qrdmulh_s32(CPU_T0E01);
+ }
+ } else if (op & 1) {
+ gen_helper_neon_mul_f32(CPU_T001);
+ } else {
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+ case 1: gen_helper_neon_mul_u16(CPU_T001); break;
+ case 2: gen_op_mul_T0_T1(); break;
+ default: return 1;
+ }
+ }
+ if (op < 8) {
+ /* Accumulate. */
+ NEON_GET_REG(T1, rd, pass);
+ switch (op) {
+ case 0:
+ gen_neon_add(size);
+ break;
+ case 1:
+ gen_helper_neon_add_f32(CPU_T001);
+ break;
+ case 4:
+ gen_neon_rsb(size);
+ break;
+ case 5:
+ gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
+ break;
+ default:
+ abort();
+ }
+ }
+ NEON_SET_REG(T0, rd, pass);
+ }
+ break;
+ case 2: /* VMLAL sclar */
+ case 3: /* VQDMLAL scalar */
+ case 6: /* VMLSL scalar */
+ case 7: /* VQDMLSL scalar */
+ case 10: /* VMULL scalar */
+ case 11: /* VQDMULL scalar */
+ if (size == 0 && (op == 3 || op == 7 || op == 11))
+ return 1;
+
+ gen_neon_get_scalar(size, rm);
+ NEON_GET_REG(T1, rn, 1);
+
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 0) {
+ tmp = neon_load_reg(rn, 0);
+ } else {
+ tmp = new_tmp();
+ tcg_gen_mov_i32(tmp, cpu_T[1]);
+ }
+ tmp2 = new_tmp();
+ tcg_gen_mov_i32(tmp2, cpu_T[0]);
+ gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+ if (op == 6 || op == 7) {
+ gen_neon_negl(cpu_V0, size);
+ }
+ if (op != 11) {
+ neon_load_reg64(cpu_V1, rd + pass);
+ }
+ switch (op) {
+ case 2: case 6:
+ gen_neon_addl(size);
+ break;
+ case 3: case 7:
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
+ break;
+ case 10:
+ /* no-op */
+ break;
+ case 11:
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ break;
+ default: /* 14 and 15 are RESERVED */
+ return 1;
+ }
+ }
+ } else { /* size == 3 */
+ if (!u) {
+ /* Extract. */
+ imm = (insn >> 8) & 0xf;
+ count = q + 1;
+
+ if (imm > 7 && !q)
+ return 1;
+
+ if (imm == 0) {
+ neon_load_reg64(cpu_V0, rn);
+ if (q) {
+ neon_load_reg64(cpu_V1, rn + 1);
+ }
+ } else if (imm == 8) {
+ neon_load_reg64(cpu_V0, rn + 1);
+ if (q) {
+ neon_load_reg64(cpu_V1, rm);
+ }
+ } else if (q) {
+ tmp = tcg_temp_new(TCG_TYPE_I64);
+ if (imm < 8) {
+ neon_load_reg64(cpu_V0, rn);
+ neon_load_reg64(tmp, rn + 1);
+ } else {
+ neon_load_reg64(cpu_V0, rn + 1);
+ neon_load_reg64(tmp, rm);
+ }
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
+ tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ if (imm < 8) {
+ neon_load_reg64(cpu_V1, rm);
+ } else {
+ neon_load_reg64(cpu_V1, rm + 1);
+ imm -= 8;
+ }
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_shri_i64(tmp, tmp, imm * 8);
+ tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
+ } else {
+ neon_load_reg64(cpu_V0, rn);
+ tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
+ neon_load_reg64(cpu_V1, rm);
+ tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ }
+ neon_store_reg64(cpu_V0, rd);
+ if (q) {
+ neon_store_reg64(cpu_V1, rd + 1);
+ }
+ } else if ((insn & (1 << 11)) == 0) {
+ /* Two register misc. */
+ op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
+ size = (insn >> 18) & 3;
+ switch (op) {
+ case 0: /* VREV64 */
+ if (size == 3)
+ return 1;
+ for (pass = 0; pass < (q ? 2 : 1); pass++) {
+ NEON_GET_REG(T0, rm, pass * 2);
+ NEON_GET_REG(T1, rm, pass * 2 + 1);
+ switch (size) {
+ case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_swap_half(cpu_T[0]); break;
+ case 2: /* no-op */ break;
+ default: abort();
+ }
+ NEON_SET_REG(T0, rd, pass * 2 + 1);
+ if (size == 2) {
+ NEON_SET_REG(T1, rd, pass * 2);
+ } else {
+ gen_op_movl_T0_T1();
+ switch (size) {
+ case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_swap_half(cpu_T[0]); break;
+ default: abort();
+ }
+ NEON_SET_REG(T0, rd, pass * 2);
+ }
+ }
+ break;
+ case 4: case 5: /* VPADDL */
+ case 12: case 13: /* VPADAL */
+ if (size == 3)
+ return 1;
+ for (pass = 0; pass < q + 1; pass++) {
+ tmp = neon_load_reg(rm, pass * 2);
+ gen_neon_widen(cpu_V0, tmp, size, op & 1);
+ tmp = neon_load_reg(rm, pass * 2 + 1);
+ gen_neon_widen(cpu_V1, tmp, size, op & 1);
+ switch (size) {
+ case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
+ case 2: tcg_gen_add_i64(CPU_V001); break;
+ default: abort();
+ }
+ if (op >= 12) {
+ /* Accumulate. */
+ neon_load_reg64(cpu_V1, rd + pass);
+ gen_neon_addl(size);
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ break;
+ case 33: /* VTRN */
+ if (size == 2) {
+ for (n = 0; n < (q ? 4 : 2); n += 2) {
+ NEON_GET_REG(T0, rm, n);
+ NEON_GET_REG(T1, rd, n + 1);
+ NEON_SET_REG(T1, rm, n);
+ NEON_SET_REG(T0, rd, n + 1);
+ }
+ } else {
+ goto elementwise;
+ }
+ break;
+ case 34: /* VUZP */
+ /* Reg Before After
+ Rd A3 A2 A1 A0 B2 B0 A2 A0
+ Rm B3 B2 B1 B0 B3 B1 A3 A1
+ */
+ if (size == 3)
+ return 1;
+ gen_neon_unzip(rd, q, 0, size);
+ gen_neon_unzip(rm, q, 4, size);
+ if (q) {
+ static int unzip_order_q[8] =
+ {0, 2, 4, 6, 1, 3, 5, 7};
+ for (n = 0; n < 8; n++) {
+ int reg = (n < 4) ? rd : rm;
+ gen_neon_movl_T0_scratch(unzip_order_q[n]);
+ NEON_SET_REG(T0, reg, n % 4);
+ }
+ } else {
+ static int unzip_order[4] =
+ {0, 4, 1, 5};
+ for (n = 0; n < 4; n++) {
+ int reg = (n < 2) ? rd : rm;
+ gen_neon_movl_T0_scratch(unzip_order[n]);
+ NEON_SET_REG(T0, reg, n % 2);
+ }
+ }
+ break;
+ case 35: /* VZIP */
+ /* Reg Before After
+ Rd A3 A2 A1 A0 B1 A1 B0 A0
+ Rm B3 B2 B1 B0 B3 A3 B2 A2
+ */
+ if (size == 3)
+ return 1;
+ count = (q ? 4 : 2);
+ for (n = 0; n < count; n++) {
+ NEON_GET_REG(T0, rd, n);
+ NEON_GET_REG(T1, rd, n);
+ switch (size) {
+ case 0: gen_helper_neon_zip_u8(); break;
+ case 1: gen_helper_neon_zip_u16(); break;
+ case 2: /* no-op */; break;
+ default: abort();
+ }
+ gen_neon_movl_scratch_T0(n * 2);
+ gen_neon_movl_scratch_T1(n * 2 + 1);
+ }
+ for (n = 0; n < count * 2; n++) {
+ int reg = (n < count) ? rd : rm;
+ gen_neon_movl_T0_scratch(n);
+ NEON_SET_REG(T0, reg, n % count);
+ }
+ break;
+ case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
+ if (size == 3)
+ return 1;
+ TCGV_UNUSED(tmp2);
+ for (pass = 0; pass < 2; pass++) {
+ neon_load_reg64(cpu_V0, rm + pass);
+ tmp = new_tmp();
+ if (op == 36 && q == 0) {
+ gen_neon_narrow(size, tmp, cpu_V0);
+ } else if (q) {
+ gen_neon_narrow_satu(size, tmp, cpu_V0);
+ } else {
+ gen_neon_narrow_sats(size, tmp, cpu_V0);
+ }
+ if (pass == 0) {
+ tmp2 = tmp;
+ } else {
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 1, tmp);
+ }
+ }
+ break;
+ case 38: /* VSHLL */
+ if (q || size == 3)
+ return 1;
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 1)
+ tmp = tmp2;
+ gen_neon_widen(cpu_V0, tmp, size, 1);
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ break;
+ default:
+ elementwise:
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ if (op == 30 || op == 31 || op >= 58) {
+ tcg_gen_ld_f32(cpu_F0s, cpu_env,
+ neon_reg_offset(rm, pass));
+ } else {
+ NEON_GET_REG(T0, rm, pass);
+ }
+ switch (op) {
+ case 1: /* VREV32 */
+ switch (size) {
+ case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_swap_half(cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 2: /* VREV16 */
+ if (size != 0)
+ return 1;
+ gen_rev16(cpu_T[0]);
+ break;
+ case 8: /* CLS */
+ switch (size) {
+ case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
+ case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 9: /* CLZ */
+ switch (size) {
+ case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
+ case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 10: /* CNT */
+ if (size != 0)
+ return 1;
+ gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
+ break;
+ case 11: /* VNOT */
+ if (size != 0)
+ return 1;
+ gen_op_notl_T0();
+ break;
+ case 14: /* VQABS */
+ switch (size) {
+ case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
+ case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
+ case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 15: /* VQNEG */
+ switch (size) {
+ case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
+ case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
+ case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 16: case 19: /* VCGT #0, VCLE #0 */
+ gen_op_movl_T1_im(0);
+ switch(size) {
+ case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
+ case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
+ case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
+ default: return 1;
+ }
+ if (op == 19)
+ gen_op_notl_T0();
+ break;
+ case 17: case 20: /* VCGE #0, VCLT #0 */
+ gen_op_movl_T1_im(0);
+ switch(size) {
+ case 0: gen_helper_neon_cge_s8(CPU_T001); break;
+ case 1: gen_helper_neon_cge_s16(CPU_T001); break;
+ case 2: gen_helper_neon_cge_s32(CPU_T001); break;
+ default: return 1;
+ }
+ if (op == 20)
+ gen_op_notl_T0();
+ break;
+ case 18: /* VCEQ #0 */
+ gen_op_movl_T1_im(0);
+ switch(size) {
+ case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
+ case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
+ case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
+ default: return 1;
+ }
+ break;
+ case 22: /* VABS */
+ switch(size) {
+ case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
+ case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
+ case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
+ default: return 1;
+ }
+ break;
+ case 23: /* VNEG */
+ gen_op_movl_T1_im(0);
+ if (size == 3)
+ return 1;
+ gen_neon_rsb(size);
+ break;
+ case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
+ gen_op_movl_T1_im(0);
+ gen_helper_neon_cgt_f32(CPU_T001);
+ if (op == 27)
+ gen_op_notl_T0();
+ break;
+ case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
+ gen_op_movl_T1_im(0);
+ gen_helper_neon_cge_f32(CPU_T001);
+ if (op == 28)
+ gen_op_notl_T0();
+ break;
+ case 26: /* Float VCEQ #0 */
+ gen_op_movl_T1_im(0);
+ gen_helper_neon_ceq_f32(CPU_T001);
+ break;
+ case 30: /* Float VABS */
+ gen_vfp_abs(0);
+ break;
+ case 31: /* Float VNEG */
+ gen_vfp_neg(0);
+ break;
+ case 32: /* VSWP */
+ NEON_GET_REG(T1, rd, pass);
+ NEON_SET_REG(T1, rm, pass);
+ break;
+ case 33: /* VTRN */
+ NEON_GET_REG(T1, rd, pass);
+ switch (size) {
+ case 0: gen_helper_neon_trn_u8(); break;
+ case 1: gen_helper_neon_trn_u16(); break;
+ case 2: abort();
+ default: return 1;
+ }
+ NEON_SET_REG(T1, rm, pass);
+ break;
+ case 56: /* Integer VRECPE */
+ gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
+ break;
+ case 57: /* Integer VRSQRTE */
+ gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
+ break;
+ case 58: /* Float VRECPE */
+ gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
+ break;
+ case 59: /* Float VRSQRTE */
+ gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
+ break;
+ case 60: /* VCVT.F32.S32 */
+ gen_vfp_tosiz(0);
+ break;
+ case 61: /* VCVT.F32.U32 */
+ gen_vfp_touiz(0);
+ break;
+ case 62: /* VCVT.S32.F32 */
+ gen_vfp_sito(0);
+ break;
+ case 63: /* VCVT.U32.F32 */
+ gen_vfp_uito(0);
+ break;
+ default:
+ /* Reserved: 21, 29, 39-56 */
+ return 1;
+ }
+ if (op == 30 || op == 31 || op >= 58) {
+ tcg_gen_st_f32(cpu_F0s, cpu_env,
+ neon_reg_offset(rd, pass));
+ } else {
+ NEON_SET_REG(T0, rd, pass);
+ }
+ }
+ break;
+ }
+ } else if ((insn & (1 << 10)) == 0) {
+ /* VTBL, VTBX. */
+ n = (insn >> 5) & 0x18;
+ if (insn & (1 << 6)) {
+ tmp = neon_load_reg(rd, 0);
+ } else {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp2 = neon_load_reg(rm, 0);
+ gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
+ tcg_const_i32(n));
+ if (insn & (1 << 6)) {
+ tmp = neon_load_reg(rd, 1);
+ } else {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp3 = neon_load_reg(rm, 1);
+ gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
+ tcg_const_i32(n));
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 1, tmp2);
+ } else if ((insn & 0x380) == 0) {
+ /* VDUP */
+ if (insn & (1 << 19)) {
+ NEON_SET_REG(T0, rm, 1);
+ } else {
+ NEON_SET_REG(T0, rm, 0);
+ }
+ if (insn & (1 << 16)) {
+ gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
+ } else if (insn & (1 << 17)) {
+ if ((insn >> 18) & 1)
+ gen_neon_dup_high16(cpu_T[0]);
+ else
+ gen_neon_dup_low16(cpu_T[0]);
+ }
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ NEON_SET_REG(T0, rd, pass);
+ }
+ } else {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
+{
+ int cpnum;
+
+ cpnum = (insn >> 8) & 0xf;
+ if (arm_feature(env, ARM_FEATURE_XSCALE)
+ && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
+ return 1;
+
+ switch (cpnum) {
+ case 0:
+ case 1:
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ return disas_iwmmxt_insn(env, s, insn);
+ } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ return disas_dsp_insn(env, s, insn);
+ }
+ return 1;
+ case 10:
+ case 11:
+ return disas_vfp_insn (env, s, insn);
+ case 15:
+ return disas_cp15_insn (env, s, insn);
+ default:
+ /* Unknown coprocessor. See if the board has hooked it. */
+ return disas_cp_insn (env, s, insn);
+ }
+}
+
+
+/* Store a 64-bit value to a register pair. Clobbers val. */
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
+{
+ TCGv tmp;
+ tmp = new_tmp();
+ tcg_gen_trunc_i64_i32(tmp, val);
+ store_reg(s, rlow, tmp);
+ tmp = new_tmp();
+ tcg_gen_shri_i64(val, val, 32);
+ tcg_gen_trunc_i64_i32(tmp, val);
+ store_reg(s, rhigh, tmp);
+}
+
+/* load a 32-bit value from a register and perform a 64-bit accumulate. */
+static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
+{
+ TCGv tmp;
+ TCGv tmp2;
+
+ /* Load 64-bit value rd:rn. */
+ tmp = tcg_temp_new(TCG_TYPE_I64);
+ tmp2 = load_reg(s, rlow);
+ tcg_gen_extu_i32_i64(tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_add_i64(val, val, tmp);
+}
+
+/* load and add a 64-bit value from a register pair. */
+static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
+{
+ TCGv tmp;
+ TCGv tmp2;
+
+ /* Load 64-bit value rd:rn. */
+ tmp = tcg_temp_new(TCG_TYPE_I64);
+ tmp2 = load_reg(s, rhigh);
+ tcg_gen_extu_i32_i64(tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_add_i64(val, val, tmp);
+
+ tmp2 = load_reg(s, rlow);
+ tcg_gen_extu_i32_i64(tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_add_i64(val, val, tmp);
+}
+
+/* Set N and Z flags from a 64-bit value. */
+static void gen_logicq_cc(TCGv val)
+{
+ TCGv tmp = new_tmp();
+ gen_helper_logicq_cc(tmp, val);
+ gen_logic_CC(tmp);
+ dead_tmp(tmp);
+}
+
static void disas_arm_insn(CPUState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
-#ifdef GEN_TRACE
- int insn_ticks = 0;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv tmp3;
+ TCGv addr;
+#ifdef CONFIG_TRACE
+ int ticks = 0;
#endif
insn = ldl_code(s->pc);
-#ifdef GEN_TRACE
+#ifdef CONFIG_TRACE
if (tracing) {
- trace_add_insn(insn, 0 /* not thumb */);
- insn_ticks = get_insn_ticks(insn);
- gen_op_trace_insn();
+ trace_add_insn(insn);
+ ticks = get_insn_ticks_arm(insn);
+ gen_helper_traceInsn();
}
#endif
s->pc += 4;
+ /* M variants do not implement ARM mode. */
+ if (IS_M(env))
+ goto illegal_op;
cond = insn >> 28;
if (cond == 0xf){
-#ifdef GEN_TRACE
- if (tracing)
- gen_op_add_to_sim_time(insn_ticks);
+#ifdef CONFIG_TRACE
+ if (tracing) {
+ gen_traceTicks(ticks);
+ }
#endif
/* Unconditional instructions. */
+ if (((insn >> 25) & 7) == 1) {
+ /* NEON Data processing. */
+ if (!arm_feature(env, ARM_FEATURE_NEON))
+ goto illegal_op;
+
+ if (disas_neon_data_insn(env, s, insn))
+ goto illegal_op;
+ return;
+ }
+ if ((insn & 0x0f100000) == 0x04000000) {
+ /* NEON load/store. */
+ if (!arm_feature(env, ARM_FEATURE_NEON))
+ goto illegal_op;
+
+ if (disas_neon_ls_insn(env, s, insn))
+ goto illegal_op;
+ return;
+ }
if ((insn & 0x0d70f000) == 0x0550f000)
return; /* PLD */
- else if ((insn & 0x0e000000) == 0x0a000000) {
+ else if ((insn & 0x0ffffdff) == 0x01010000) {
+ ARCH(6);
+ /* setend */
+ if (insn & (1 << 9)) {
+ /* BE8 mode not implemented. */
+ goto illegal_op;
+ }
+ return;
+ } else if ((insn & 0x0fffff00) == 0x057ff000) {
+ switch ((insn >> 4) & 0xf) {
+ case 1: /* clrex */
+ ARCH(6K);
+ gen_helper_clrex(cpu_env);
+ return;
+ case 4: /* dsb */
+ case 5: /* dmb */
+ case 6: /* isb */
+ ARCH(7);
+ /* We don't emulate caches so these are a no-op. */
+ return;
+ default:
+ goto illegal_op;
+ }
+ } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
+ /* srs */
+ uint32_t offset;
+ if (IS_USER(s))
+ goto illegal_op;
+ ARCH(6);
+ op1 = (insn & 0x1f);
+ if (op1 == (env->uncached_cpsr & CPSR_M)) {
+ addr = load_reg(s, 13);
+ } else {
+ addr = new_tmp();
+ gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
+ }
+ i = (insn >> 23) & 3;
+ switch (i) {
+ case 0: offset = -4; break; /* DA */
+ case 1: offset = -8; break; /* DB */
+ case 2: offset = 0; break; /* IA */
+ case 3: offset = 4; break; /* IB */
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, addr, offset);
+ tmp = load_reg(s, 14);
+ gen_st32(tmp, addr, 0);
+ tmp = new_tmp();
+ gen_helper_cpsr_read(tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ gen_st32(tmp, addr, 0);
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ switch (i) {
+ case 0: offset = -8; break;
+ case 1: offset = -4; break;
+ case 2: offset = 4; break;
+ case 3: offset = 0; break;
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, tmp, offset);
+ if (op1 == (env->uncached_cpsr & CPSR_M)) {
+ gen_movl_reg_T1(s, 13);
+ } else {
+ gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
+ }
+ } else {
+ dead_tmp(addr);
+ }
+ } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
+ /* rfe */
+ uint32_t offset;
+ if (IS_USER(s))
+ goto illegal_op;
+ ARCH(6);
+ rn = (insn >> 16) & 0xf;
+ addr = load_reg(s, rn);
+ i = (insn >> 23) & 3;
+ switch (i) {
+ case 0: offset = -4; break; /* DA */
+ case 1: offset = -8; break; /* DB */
+ case 2: offset = 0; break; /* IA */
+ case 3: offset = 4; break; /* IB */
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, addr, offset);
+ /* Load PC into tmp and CPSR into tmp2. */
+ tmp = gen_ld32(addr, 0);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp2 = gen_ld32(addr, 0);
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ switch (i) {
+ case 0: offset = -8; break;
+ case 1: offset = -4; break;
+ case 2: offset = 4; break;
+ case 3: offset = 0; break;
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, addr, offset);
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ gen_rfe(s, tmp, tmp2);
+ } else if ((insn & 0x0e000000) == 0x0a000000) {
/* branch link and change to thumb (blx <offset>) */
int32_t offset;
val = (uint32_t)s->pc;
- gen_op_movl_T0_im(val);
- gen_movl_reg_T0(s, 14);
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 14, tmp);
/* Sign-extend the 24-bit offset */
offset = (((int32_t)insn) << 8) >> 8;
/* offset * 4 + bit24 * 2 + (thumb bit) */
val += (offset << 2) | ((insn >> 23) & 2) | 1;
/* pipeline offset */
val += 4;
- gen_op_movl_T0_im(val);
- gen_bx(s);
+ gen_bx_im(s, val);
return;
+ } else if ((insn & 0x0e000f00) == 0x0c000100) {
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ /* iWMMXt register transfer. */
+ if (env->cp15.c15_cpar & (1 << 1))
+ if (!disas_iwmmxt_insn(env, s, insn))
+ return;
+ }
} else if ((insn & 0x0fe00000) == 0x0c400000) {
/* Coprocessor double register transfer. */
} else if ((insn & 0x0f000010) == 0x0e000010) {
/* Additional coprocessor register transfer. */
- } else if ((insn & 0x0ff10010) == 0x01000000) {
+ } else if ((insn & 0x0ff10020) == 0x01000000) {
+ uint32_t mask;
+ uint32_t val;
/* cps (privileged) */
- } else if ((insn & 0x0ffffdff) == 0x01010000) {
- /* setend */
- if (insn & (1 << 9)) {
- /* BE8 mode not implemented. */
- goto illegal_op;
+ if (IS_USER(s))
+ return;
+ mask = val = 0;
+ if (insn & (1 << 19)) {
+ if (insn & (1 << 8))
+ mask |= CPSR_A;
+ if (insn & (1 << 7))
+ mask |= CPSR_I;
+ if (insn & (1 << 6))
+ mask |= CPSR_F;
+ if (insn & (1 << 18))
+ val |= mask;
+ }
+ if (insn & (1 << 17)) {
+ mask |= CPSR_M;
+ val |= (insn & 0x1f);
+ }
+ if (mask) {
+ gen_op_movl_T0_im(val);
+ gen_set_psr_T0(s, mask, 0);
}
return;
}
goto illegal_op;
}
if (cond != 0xe) {
-#ifdef GEN_TRACE
+#ifdef CONFIG_TRACE
if (tracing) {
- /* a non-executed conditional instruction takes only 1 cycle */
- gen_op_add_to_sim_time(1);
- insn_ticks -= 1;
+ /* a non-executed conditional instruction takes */
+ /* only 1 cycle */
+ gen_traceTicks(1);
+ ticks -= 1;
}
#endif
/* if not always execute, we generate a conditional jump to
next instruction */
s->condlabel = gen_new_label();
- gen_test_cc[cond ^ 1](s->condlabel);
+ gen_test_cc(cond ^ 1, s->condlabel);
s->condjmp = 1;
- //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
- //s->is_jmp = DISAS_JUMP_NEXT;
}
-#ifdef GEN_TRACE
- if (tracing)
- gen_op_add_to_sim_time(insn_ticks);
+#ifdef CONFIG_TRACE
+ if (tracing && ticks > 0) {
+ gen_traceTicks(ticks);
+ }
#endif
if ((insn & 0x0f900000) == 0x03000000) {
- if ((insn & 0x0fb0f000) != 0x0320f000)
- goto illegal_op;
- /* CPSR = immediate */
- val = insn & 0xff;
- shift = ((insn >> 8) & 0xf) * 2;
- if (shift)
- val = (val >> shift) | (val << (32 - shift));
- gen_op_movl_T0_im(val);
- i = ((insn & (1 << 22)) != 0);
- if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
- goto illegal_op;
+ if ((insn & (1 << 21)) == 0) {
+ ARCH(6T2);
+ rd = (insn >> 12) & 0xf;
+ val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
+ if ((insn & (1 << 22)) == 0) {
+ /* MOVW */
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ } else {
+ /* MOVT */
+ tmp = load_reg(s, rd);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_ori_i32(tmp, tmp, val << 16);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ if (((insn >> 12) & 0xf) != 0xf)
+ goto illegal_op;
+ if (((insn >> 16) & 0xf) == 0) {
+ gen_nop_hint(s, insn & 0xff);
+ } else {
+ /* CPSR = immediate */
+ val = insn & 0xff;
+ shift = ((insn >> 8) & 0xf) * 2;
+ if (shift)
+ val = (val >> shift) | (val << (32 - shift));
+ gen_op_movl_T0_im(val);
+ i = ((insn & (1 << 22)) != 0);
+ if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
+ goto illegal_op;
+ }
+ }
} else if ((insn & 0x0f900000) == 0x01000000
&& (insn & 0x00000090) != 0x00000090) {
/* miscellaneous instructions */
@@ -1223,7 +5959,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* PSR = reg */
gen_movl_T0_reg(s, rm);
i = ((op1 & 2) != 0);
- if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
+ if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
goto illegal_op;
} else {
/* reg = PSR */
@@ -1231,24 +5967,25 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (op1 & 2) {
if (IS_USER(s))
goto illegal_op;
- gen_op_movl_T0_spsr();
+ tmp = load_cpu_field(spsr);
} else {
- gen_op_movl_T0_cpsr();
+ tmp = new_tmp();
+ gen_helper_cpsr_read(tmp);
}
- gen_movl_reg_T0(s, rd);
+ store_reg(s, rd, tmp);
}
break;
case 0x1:
if (op1 == 1) {
/* branch/exchange thumb (bx). */
- gen_movl_T0_reg(s, rm);
- gen_bx(s);
+ tmp = load_reg(s, rm);
+ gen_bx(s, tmp);
} else if (op1 == 3) {
/* clz */
rd = (insn >> 12) & 0xf;
- gen_movl_T0_reg(s, rm);
- gen_op_clz_T0();
- gen_movl_reg_T0(s, rd);
+ tmp = load_reg(s, rm);
+ gen_helper_clz(tmp, tmp);
+ store_reg(s, rd, tmp);
} else {
goto illegal_op;
}
@@ -1257,8 +5994,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (op1 == 1) {
ARCH(5J); /* bxj */
/* Trivial implementation equivalent to bx. */
- gen_movl_T0_reg(s, rm);
- gen_bx(s);
+ tmp = load_reg(s, rm);
+ gen_bx(s, tmp);
} else {
goto illegal_op;
}
@@ -1268,29 +6005,30 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
goto illegal_op;
/* branch link/exchange thumb (blx) */
- val = (uint32_t)s->pc;
- gen_op_movl_T0_im(val);
- gen_movl_reg_T0(s, 14);
- gen_movl_T0_reg(s, rm);
- gen_bx(s);
+ tmp = load_reg(s, rm);
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, s->pc);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
break;
case 0x5: /* saturating add/subtract */
rd = (insn >> 12) & 0xf;
rn = (insn >> 16) & 0xf;
- gen_movl_T0_reg(s, rm);
- gen_movl_T1_reg(s, rn);
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_op_double_T1_saturate();
+ gen_helper_double_saturate(tmp2, tmp2);
if (op1 & 1)
- gen_op_subl_T0_T1_saturate();
+ gen_helper_sub_saturate(tmp, tmp, tmp2);
else
- gen_op_addl_T0_T1_saturate();
- gen_movl_reg_T0(s, rd);
+ gen_helper_add_saturate(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
break;
case 7: /* bkpt */
- gen_op_movl_T0_im((long)s->pc - 4);
- gen_op_movl_reg_TN[0][15]();
- gen_op_bkpt();
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 4);
+ gen_exception(EXCP_BKPT);
s->is_jmp = DISAS_JUMP;
break;
case 0x8: /* signed multiply */
@@ -1302,34 +6040,41 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
rd = (insn >> 16) & 0xf;
if (op1 == 1) {
/* (32 * 16) >> 16 */
- gen_movl_T0_reg(s, rm);
- gen_movl_T1_reg(s, rs);
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
if (sh & 4)
- gen_op_sarl_T1_im(16);
+ tcg_gen_sari_i32(tmp2, tmp2, 16);
else
- gen_op_sxth_T1();
- gen_op_imulw_T0_T1();
+ gen_sxth(tmp2);
+ tmp2 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp = new_tmp();
+ tcg_gen_trunc_i64_i32(tmp, tmp2);
if ((sh & 2) == 0) {
- gen_movl_T1_reg(s, rn);
- gen_op_addl_T0_T1_setq();
+ tmp2 = load_reg(s, rn);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
}
- gen_movl_reg_T0(s, rd);
+ store_reg(s, rd, tmp);
} else {
/* 16 * 16 */
- gen_movl_T0_reg(s, rm);
- gen_movl_T1_reg(s, rs);
- gen_mulxy(sh & 2, sh & 4);
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
+ dead_tmp(tmp2);
if (op1 == 2) {
- gen_op_signbit_T1_T0();
- gen_op_addq_T0_T1(rn, rd);
- gen_movl_reg_T0(s, rn);
- gen_movl_reg_T1(s, rd);
+ tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(tmp2, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp2, rn, rd);
+ gen_storeq_reg(s, rn, rd, tmp2);
} else {
if (op1 == 0) {
- gen_movl_T1_reg(s, rn);
- gen_op_addl_T0_T1_setq();
+ tmp2 = load_reg(s, rn);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
}
- gen_movl_reg_T0(s, rd);
+ store_reg(s, rd, tmp);
}
}
break;
@@ -1354,7 +6099,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
val = (val >> shift) | (val << (32 - shift));
gen_op_movl_T1_im(val);
if (logic_cc && shift)
- gen_op_mov_CF_T1();
+ gen_set_CF_bit31(cpu_T[1]);
} else {
/* register */
rm = (insn) & 0xf;
@@ -1362,27 +6107,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
shiftop = (insn >> 5) & 3;
if (!(insn & (1 << 4))) {
shift = (insn >> 7) & 0x1f;
- if (shift != 0) {
- if (logic_cc) {
- gen_shift_T1_im_cc[shiftop](shift);
- } else {
- gen_shift_T1_im[shiftop](shift);
- }
- } else if (shiftop != 0) {
- if (logic_cc) {
- gen_shift_T1_0_cc[shiftop]();
- } else {
- gen_shift_T1_0[shiftop]();
- }
- }
+ gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
} else {
rs = (insn >> 8) & 0xf;
- gen_movl_T0_reg(s, rs);
- if (logic_cc) {
- gen_shift_T1_T0_cc[shiftop]();
- } else {
- gen_shift_T1_T0[shiftop]();
- }
+ tmp = load_reg(s, rs);
+ gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
}
}
if (op1 != 0x0f && op1 != 0x0d) {
@@ -1436,21 +6165,21 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (set_cc)
gen_op_adcl_T0_T1_cc();
else
- gen_op_adcl_T0_T1();
+ gen_adc_T0_T1();
gen_movl_reg_T0(s, rd);
break;
case 0x06:
if (set_cc)
gen_op_sbcl_T0_T1_cc();
else
- gen_op_sbcl_T0_T1();
+ gen_sbc_T0_T1();
gen_movl_reg_T0(s, rd);
break;
case 0x07:
if (set_cc)
gen_op_rscl_T0_T1_cc();
else
- gen_op_rscl_T0_T1();
+ gen_rsc_T0_T1();
gen_movl_reg_T0(s, rd);
break;
case 0x08:
@@ -1522,131 +6251,408 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
rn = (insn >> 12) & 0xf;
rs = (insn >> 8) & 0xf;
rm = (insn) & 0xf;
- if (((insn >> 22) & 3) == 0) {
+ op1 = (insn >> 20) & 0xf;
+ switch (op1) {
+ case 0: case 1: case 2: case 3: case 6:
/* 32 bit mul */
- gen_movl_T0_reg(s, rs);
- gen_movl_T1_reg(s, rm);
- gen_op_mul_T0_T1();
- if (insn & (1 << 21)) {
- gen_movl_T1_reg(s, rn);
- gen_op_addl_T0_T1();
+ tmp = load_reg(s, rs);
+ tmp2 = load_reg(s, rm);
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (insn & (1 << 22)) {
+ /* Subtract (mls) */
+ ARCH(6T2);
+ tmp2 = load_reg(s, rn);
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ dead_tmp(tmp2);
+ } else if (insn & (1 << 21)) {
+ /* Add */
+ tmp2 = load_reg(s, rn);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
}
if (insn & (1 << 20))
- gen_op_logic_T0_cc();
- gen_movl_reg_T0(s, rd);
- } else {
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
+ break;
+ default:
/* 64 bit mul */
- gen_movl_T0_reg(s, rs);
- gen_movl_T1_reg(s, rm);
+ tmp = load_reg(s, rs);
+ tmp2 = load_reg(s, rm);
if (insn & (1 << 22))
- gen_op_imull_T0_T1();
+ tmp = gen_muls_i64_i32(tmp, tmp2);
else
- gen_op_mull_T0_T1();
+ tmp = gen_mulu_i64_i32(tmp, tmp2);
if (insn & (1 << 21)) /* mult accumulate */
- gen_op_addq_T0_T1(rn, rd);
+ gen_addq(s, tmp, rn, rd);
if (!(insn & (1 << 23))) { /* double accumulate */
ARCH(6);
- gen_op_addq_lo_T0_T1(rn);
- gen_op_addq_lo_T0_T1(rd);
+ gen_addq_lo(s, tmp, rn);
+ gen_addq_lo(s, tmp, rd);
}
if (insn & (1 << 20))
- gen_op_logicq_cc();
- gen_movl_reg_T0(s, rn);
- gen_movl_reg_T1(s, rd);
+ gen_logicq_cc(tmp);
+ gen_storeq_reg(s, rn, rd, tmp);
+ break;
}
} else {
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
if (insn & (1 << 23)) {
/* load/store exclusive */
- goto illegal_op;
+ gen_movl_T1_reg(s, rn);
+ addr = cpu_T[1];
+ if (insn & (1 << 20)) {
+ gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ } else {
+ int label = gen_new_label();
+ rm = insn & 0xf;
+ gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
+ tmp = load_reg(s,rm);
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ gen_set_label(label);
+ gen_movl_reg_T0(s, rd);
+ }
} else {
/* SWP instruction */
rm = (insn) & 0xf;
- gen_movl_T0_reg(s, rm);
- gen_movl_T1_reg(s, rn);
+ /* ??? This is not really atomic. However we know
+ we never have multiple CPUs running in parallel,
+ so it is good enough. */
+ addr = load_reg(s, rn);
+ tmp = load_reg(s, rm);
if (insn & (1 << 22)) {
- gen_ldst(swpb, s);
+ tmp2 = gen_ld8u(addr, IS_USER(s));
+ gen_st8(tmp, addr, IS_USER(s));
} else {
- gen_ldst(swpl, s);
+ tmp2 = gen_ld32(addr, IS_USER(s));
+ gen_st32(tmp, addr, IS_USER(s));
}
- gen_movl_reg_T0(s, rd);
+ dead_tmp(addr);
+ store_reg(s, rd, tmp2);
}
}
} else {
int address_offset;
+ int load;
/* Misc load/store */
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
- gen_movl_T1_reg(s, rn);
+ addr = load_reg(s, rn);
if (insn & (1 << 24))
- gen_add_datah_offset(s, insn, 0);
+ gen_add_datah_offset(s, insn, 0, addr);
address_offset = 0;
if (insn & (1 << 20)) {
/* load */
switch(sh) {
case 1:
- gen_ldst(lduw, s);
+ tmp = gen_ld16u(addr, IS_USER(s));
break;
case 2:
- gen_ldst(ldsb, s);
+ tmp = gen_ld8s(addr, IS_USER(s));
break;
default:
case 3:
- gen_ldst(ldsw, s);
+ tmp = gen_ld16s(addr, IS_USER(s));
break;
}
- gen_movl_reg_T0(s, rd);
+ load = 1;
} else if (sh & 2) {
/* doubleword */
if (sh & 1) {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stl, s);
- gen_op_addl_T1_im(4);
- gen_movl_T0_reg(s, rd + 1);
- gen_ldst(stl, s);
+ tmp = load_reg(s, rd);
+ gen_st32(tmp, addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rd + 1);
+ gen_st32(tmp, addr, IS_USER(s));
+ load = 0;
} else {
/* load */
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, rd);
- gen_op_addl_T1_im(4);
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, rd + 1);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ rd++;
+ load = 1;
}
address_offset = -4;
} else {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stw, s);
+ tmp = load_reg(s, rd);
+ gen_st16(tmp, addr, IS_USER(s));
+ load = 0;
}
+ /* Perform base writeback before the loaded value to
+ ensure correct behavior with overlapping index registers.
+ ldrd with base writeback is is undefined if the
+ destination and index registers overlap. */
if (!(insn & (1 << 24))) {
- gen_add_datah_offset(s, insn, address_offset);
- gen_movl_reg_T1(s, rn);
+ gen_add_datah_offset(s, insn, address_offset, addr);
+ store_reg(s, rn, addr);
} else if (insn & (1 << 21)) {
if (address_offset)
- gen_op_addl_T1_im(address_offset);
- gen_movl_reg_T1(s, rn);
+ tcg_gen_addi_i32(addr, addr, address_offset);
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ if (load) {
+ /* Complete the load. */
+ store_reg(s, rd, tmp);
}
}
break;
case 0x4:
case 0x5:
+ goto do_ldst;
case 0x6:
case 0x7:
-#ifdef GEN_TRACE
- /* Added a special undefined instruction to cause the
- * simulator to exit. This allows us to write short assembly
- * language tests that can exit the simulator.
- */
-#define EXIT_SIMULATION 0xe6c00110
- if (insn == EXIT_SIMULATION) {
- gen_op_shutdown();
+ if (insn & (1 << 4)) {
+ ARCH(6);
+ /* Armv6 Media instructions. */
+ rm = insn & 0xf;
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ rs = (insn >> 8) & 0xf;
+ switch ((insn >> 23) & 3) {
+ case 0: /* Parallel add/subtract. */
+ op1 = (insn >> 20) & 7;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ sh = (insn >> 5) & 7;
+ if ((op1 & 3) == 0 || sh == 5 || sh == 6)
+ goto illegal_op;
+ gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 1:
+ if ((insn & 0x00700020) == 0) {
+ /* Halfword pack. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ if (insn & (1 << 6)) {
+ /* pkhtb */
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp2, tmp2, shift);
+ tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
+ tcg_gen_ext16u_i32(tmp2, tmp2);
+ } else {
+ /* pkhbt */
+ if (shift)
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ }
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00200020) == 0x00200000) {
+ /* [us]sat */
+ tmp = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ if (insn & (1 << 6)) {
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp, tmp, shift);
+ } else {
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ sh = (insn >> 16) & 0x1f;
+ if (sh != 0) {
+ if (insn & (1 << 22))
+ gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
+ else
+ gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
+ }
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00300fe0) == 0x00200f20) {
+ /* [us]sat16 */
+ tmp = load_reg(s, rm);
+ sh = (insn >> 16) & 0x1f;
+ if (sh != 0) {
+ if (insn & (1 << 22))
+ gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
+ else
+ gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
+ }
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00700fe0) == 0x00000fa0) {
+ /* Select bytes. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ tmp3 = new_tmp();
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
+ dead_tmp(tmp3);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x000003e0) == 0x00000060) {
+ tmp = load_reg(s, rm);
+ shift = (insn >> 10) & 3;
+ /* ??? In many cases it's not neccessary to do a
+ rotate, a shift is sufficient. */
+ if (shift != 0)
+ tcg_gen_rori_i32(tmp, tmp, shift * 8);
+ op1 = (insn >> 20) & 7;
+ switch (op1) {
+ case 0: gen_sxtb16(tmp); break;
+ case 2: gen_sxtb(tmp); break;
+ case 3: gen_sxth(tmp); break;
+ case 4: gen_uxtb16(tmp); break;
+ case 6: gen_uxtb(tmp); break;
+ case 7: gen_uxth(tmp); break;
+ default: goto illegal_op;
+ }
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ if ((op1 & 3) == 0) {
+ gen_add16(tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ }
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x003f0f60) == 0x003f0f20) {
+ /* rev */
+ tmp = load_reg(s, rm);
+ if (insn & (1 << 22)) {
+ if (insn & (1 << 7)) {
+ gen_revsh(tmp);
+ } else {
+ ARCH(6T2);
+ gen_helper_rbit(tmp, tmp);
+ }
+ } else {
+ if (insn & (1 << 7))
+ gen_rev16(tmp);
+ else
+ tcg_gen_bswap_i32(tmp, tmp);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 2: /* Multiplies (Type 3). */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 20)) {
+ /* Signed multiply most significant [accumulate]. */
+ tmp2 = gen_muls_i64_i32(tmp, tmp2);
+ if (insn & (1 << 5))
+ tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
+ tcg_gen_shri_i64(tmp2, tmp2, 32);
+ tmp = new_tmp();
+ tcg_gen_trunc_i64_i32(tmp, tmp2);
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ if (insn & (1 << 6)) {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ if (insn & (1 << 5))
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ /* This addition cannot overflow. */
+ if (insn & (1 << 6)) {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp2);
+ if (insn & (1 << 22)) {
+ /* smlald, smlsld */
+ tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(tmp2, tmp);
+ dead_tmp(tmp);
+ gen_addq(s, tmp2, rd, rn);
+ gen_storeq_reg(s, rd, rn, tmp2);
+ } else {
+ /* smuad, smusd, smlad, smlsd */
+ if (rd != 15)
+ {
+ tmp2 = load_reg(s, rd);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ store_reg(s, rn, tmp);
+ }
+ }
+ break;
+ case 3:
+ op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
+ switch (op1) {
+ case 0: /* Unsigned sum of absolute differences. */
+ ARCH(6);
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ gen_helper_usad8(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x20: case 0x24: case 0x28: case 0x2c:
+ /* Bitfield insert/clear. */
+ ARCH(6T2);
+ shift = (insn >> 7) & 0x1f;
+ i = (insn >> 16) & 0x1f;
+ i = i + 1 - shift;
+ if (rm == 15) {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rm);
+ }
+ if (i != 32) {
+ tmp2 = load_reg(s, rd);
+ gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
+ dead_tmp(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
+ case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
+ tmp = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ i = ((insn >> 16) & 0x1f) + 1;
+ if (shift + i > 32)
+ goto illegal_op;
+ if (i < 32) {
+ if (op1 & 0x20) {
+ gen_ubfx(tmp, shift, (1u << i) - 1);
+ } else {
+ gen_sbfx(tmp, shift, i);
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ }
break;
}
-#endif
-
+ do_ldst:
/* Check for undefined extension instructions
* per the ARM Bible IE:
* xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
@@ -1659,67 +6665,47 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* load/store byte/word */
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
- gen_movl_T1_reg(s, rn);
+ tmp2 = load_reg(s, rn);
i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
if (insn & (1 << 24))
- gen_add_data_offset(s, insn);
+ gen_add_data_offset(s, insn, tmp2);
if (insn & (1 << 20)) {
/* load */
-#if defined(CONFIG_USER_ONLY)
- if (insn & (1 << 22))
- gen_op_ldub_raw();
- else
- gen_op_ldl_raw();
-#else
+ s->is_mem = 1;
if (insn & (1 << 22)) {
- if (i)
- gen_op_ldub_user();
- else
- gen_op_ldub_kernel();
+ tmp = gen_ld8u(tmp2, i);
} else {
- if (i)
- gen_op_ldl_user();
- else
- gen_op_ldl_kernel();
+ tmp = gen_ld32(tmp2, i);
}
-#endif
- if (rd == 15)
- gen_bx(s);
- else
- gen_movl_reg_T0(s, rd);
} else {
/* store */
- gen_movl_T0_reg(s, rd);
-#if defined(CONFIG_USER_ONLY)
+ tmp = load_reg(s, rd);
if (insn & (1 << 22))
- gen_op_stb_raw();
+ gen_st8(tmp, tmp2, i);
else
- gen_op_stl_raw();
-#else
- if (insn & (1 << 22)) {
- if (i)
- gen_op_stb_user();
- else
- gen_op_stb_kernel();
- } else {
- if (i)
- gen_op_stl_user();
- else
- gen_op_stl_kernel();
- }
-#endif
+ gen_st32(tmp, tmp2, i);
}
if (!(insn & (1 << 24))) {
- gen_add_data_offset(s, insn);
- gen_movl_reg_T1(s, rn);
- } else if (insn & (1 << 21))
- gen_movl_reg_T1(s, rn); {
+ gen_add_data_offset(s, insn, tmp2);
+ store_reg(s, rn, tmp2);
+ } else if (insn & (1 << 21)) {
+ store_reg(s, rn, tmp2);
+ } else {
+ dead_tmp(tmp2);
+ }
+ if (insn & (1 << 20)) {
+ /* Complete the load. */
+ if (rd == 15)
+ gen_bx(s, tmp);
+ else
+ store_reg(s, rd, tmp);
}
break;
case 0x08:
case 0x09:
{
int j, n, user, loaded_base;
+ TCGv loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
user = 0;
@@ -1731,10 +6717,11 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
user = 1;
}
rn = (insn >> 16) & 0xf;
- gen_movl_T1_reg(s, rn);
+ addr = load_reg(s, rn);
/* compute total size */
loaded_base = 0;
+ TCGV_UNUSED(loaded_var);
n = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i))
@@ -1744,18 +6731,18 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (insn & (1 << 23)) {
if (insn & (1 << 24)) {
/* pre increment */
- gen_op_addl_T1_im(4);
+ tcg_gen_addi_i32(addr, addr, 4);
} else {
/* post increment */
}
} else {
if (insn & (1 << 24)) {
/* pre decrement */
- gen_op_addl_T1_im(-(n * 4));
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
} else {
/* post decrement */
if (n != 1)
- gen_op_addl_T1_im(-((n - 1) * 4));
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
}
}
j = 0;
@@ -1763,34 +6750,37 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
if (insn & (1 << i)) {
if (insn & (1 << 20)) {
/* load */
- gen_ldst(ldl, s);
+ tmp = gen_ld32(addr, IS_USER(s));
if (i == 15) {
- gen_bx(s);
+ gen_bx(s, tmp);
} else if (user) {
- gen_op_movl_user_T0(i);
+ gen_helper_set_user_reg(tcg_const_i32(i), tmp);
+ dead_tmp(tmp);
} else if (i == rn) {
- gen_op_movl_T2_T0();
+ loaded_var = tmp;
loaded_base = 1;
} else {
- gen_movl_reg_T0(s, i);
+ store_reg(s, i, tmp);
}
} else {
/* store */
if (i == 15) {
- /* special case: r15 = PC + 8 (was 12) */
+ /* special case: r15 = PC + 8 */
val = (long)s->pc + 4;
- gen_op_movl_TN_im[0](val);
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
} else if (user) {
- gen_op_movl_T0_user(i);
+ tmp = new_tmp();
+ gen_helper_get_user_reg(tmp, tcg_const_i32(i));
} else {
- gen_movl_T0_reg(s, i);
+ tmp = load_reg(s, i);
}
- gen_ldst(stl, s);
+ gen_st32(tmp, addr, IS_USER(s));
}
j++;
/* no need to add after the last transfer */
if (j != n)
- gen_op_addl_T1_im(4);
+ tcg_gen_addi_i32(addr, addr, 4);
}
}
if (insn & (1 << 21)) {
@@ -1800,28 +6790,30 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* pre increment */
} else {
/* post increment */
- gen_op_addl_T1_im(4);
+ tcg_gen_addi_i32(addr, addr, 4);
}
} else {
if (insn & (1 << 24)) {
/* pre decrement */
if (n != 1)
- gen_op_addl_T1_im(-((n - 1) * 4));
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
} else {
/* post decrement */
- gen_op_addl_T1_im(-(n * 4));
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
}
}
- gen_movl_reg_T1(s, rn);
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
}
if (loaded_base) {
- gen_op_movl_T0_T2();
- gen_movl_reg_T0(s, rn);
+ store_reg(s, rn, loaded_var);
}
if ((insn & (1 << 22)) && !user) {
/* Restore CPSR from SPSR. */
- gen_op_movl_T0_spsr();
- gen_op_movl_cpsr_T0(0xffffffff);
+ tmp = load_cpu_field(spsr);
+ gen_set_cpsr(tmp, 0xffffffff);
+ dead_tmp(tmp);
s->is_jmp = DISAS_UPDATE;
}
}
@@ -1834,8 +6826,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
/* branch (and link) */
val = (int32_t)s->pc;
if (insn & (1 << 24)) {
- gen_op_movl_T0_im(val);
- gen_op_movl_reg_TN[0][14]();
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 14, tmp);
}
offset = (((int32_t)insn << 8) >> 8);
val += (offset << 2) + 4;
@@ -1846,55 +6839,1156 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
case 0xd:
case 0xe:
/* Coprocessor. */
- op1 = (insn >> 8) & 0xf;
- switch (op1) {
- case 10:
- case 11:
- if (disas_vfp_insn (env, s, insn))
- goto illegal_op;
- break;
- case 15:
- if (disas_cp15_insn (s, insn))
- goto illegal_op;
- break;
- default:
- /* unknown coprocessor. */
+ if (disas_coproc_insn(env, s, insn))
goto illegal_op;
- }
break;
case 0xf:
/* swi */
- gen_op_movl_T0_im((long)s->pc);
- gen_op_movl_reg_TN[0][15]();
- gen_op_swi();
- s->is_jmp = DISAS_JUMP;
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_op_movl_T0_im((long)s->pc - 4);
- gen_op_movl_reg_TN[0][15]();
- gen_op_undef_insn();
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 4);
+ gen_exception(EXCP_UDEF);
s->is_jmp = DISAS_JUMP;
break;
}
}
}
-static void disas_thumb_insn(DisasContext *s)
+/* Return true if this is a Thumb-2 logical op. */
+static int
+thumb2_logic_op(int op)
+{
+ return (op < 8);
+}
+
+/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
+ then set condition code flags based on the result of the operation.
+ If SHIFTER_OUT is nonzero then set the carry flag for logical operations
+ to the high bit of T1.
+ Returns zero if the opcode is valid. */
+
+static int
+gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
+{
+ int logic_cc;
+
+ logic_cc = 0;
+ switch (op) {
+ case 0: /* and */
+ gen_op_andl_T0_T1();
+ logic_cc = conds;
+ break;
+ case 1: /* bic */
+ gen_op_bicl_T0_T1();
+ logic_cc = conds;
+ break;
+ case 2: /* orr */
+ gen_op_orl_T0_T1();
+ logic_cc = conds;
+ break;
+ case 3: /* orn */
+ gen_op_notl_T1();
+ gen_op_orl_T0_T1();
+ logic_cc = conds;
+ break;
+ case 4: /* eor */
+ gen_op_xorl_T0_T1();
+ logic_cc = conds;
+ break;
+ case 8: /* add */
+ if (conds)
+ gen_op_addl_T0_T1_cc();
+ else
+ gen_op_addl_T0_T1();
+ break;
+ case 10: /* adc */
+ if (conds)
+ gen_op_adcl_T0_T1_cc();
+ else
+ gen_adc_T0_T1();
+ break;
+ case 11: /* sbc */
+ if (conds)
+ gen_op_sbcl_T0_T1_cc();
+ else
+ gen_sbc_T0_T1();
+ break;
+ case 13: /* sub */
+ if (conds)
+ gen_op_subl_T0_T1_cc();
+ else
+ gen_op_subl_T0_T1();
+ break;
+ case 14: /* rsb */
+ if (conds)
+ gen_op_rsbl_T0_T1_cc();
+ else
+ gen_op_rsbl_T0_T1();
+ break;
+ default: /* 5, 6, 7, 9, 12, 15. */
+ return 1;
+ }
+ if (logic_cc) {
+ gen_op_logic_T0_cc();
+ if (shifter_out)
+ gen_set_CF_bit31(cpu_T[1]);
+ }
+ return 0;
+}
+
+/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
+ is not legal. */
+static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
+{
+ uint32_t insn, imm, shift, offset;
+ uint32_t rd, rn, rm, rs;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv tmp3;
+ TCGv addr;
+ int op;
+ int shiftop;
+ int conds;
+ int logic_cc;
+
+ if (!(arm_feature(env, ARM_FEATURE_THUMB2)
+ || arm_feature (env, ARM_FEATURE_M))) {
+ /* Thumb-1 cores may need to treat bl and blx as a pair of
+ 16-bit instructions to get correct prefetch abort behavior. */
+ insn = insn_hw1;
+ if ((insn & (1 << 12)) == 0) {
+ /* Second half of blx. */
+ offset = ((insn & 0x7ff) << 1);
+ tmp = load_reg(s, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
+
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
+ return 0;
+ }
+ if (insn & (1 << 11)) {
+ /* Second half of bl. */
+ offset = ((insn & 0x7ff) << 1) | 1;
+ tmp = load_reg(s, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
+
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
+ return 0;
+ }
+ if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
+ /* Instruction spans a page boundary. Implement it as two
+ 16-bit instructions in case the second half causes an
+ prefetch abort. */
+ offset = ((int32_t)insn << 21) >> 9;
+ gen_op_movl_T0_im(s->pc + 2 + offset);
+ gen_movl_reg_T0(s, 14);
+ return 0;
+ }
+ /* Fall through to 32-bit decode. */
+ }
+
+ insn = lduw_code(s->pc);
+#ifdef CONFIG_TRACE
+ if (tracing) {
+ int ticks = get_insn_ticks_thumb(insn);
+ trace_add_insn( insn_wrap_thumb(insn), 1 );
+ gen_helper_traceInsn();
+ gen_traceTicks(ticks);
+ }
+#endif
+ s->pc += 2;
+ insn |= (uint32_t)insn_hw1 << 16;
+
+ if ((insn & 0xf800e800) != 0xf000e800) {
+ ARCH(6T2);
+ }
+
+ rn = (insn >> 16) & 0xf;
+ rs = (insn >> 12) & 0xf;
+ rd = (insn >> 8) & 0xf;
+ rm = insn & 0xf;
+ switch ((insn >> 25) & 0xf) {
+ case 0: case 1: case 2: case 3:
+ /* 16-bit instructions. Should never happen. */
+ abort();
+ case 4:
+ if (insn & (1 << 22)) {
+ /* Other load/store, table branch. */
+ if (insn & 0x01200000) {
+ /* Load/store doubleword. */
+ if (rn == 15) {
+ addr = new_tmp();
+ tcg_gen_movi_i32(addr, s->pc & ~3);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ offset = (insn & 0xff) * 4;
+ if ((insn & (1 << 23)) == 0)
+ offset = -offset;
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ offset = 0;
+ }
+ if (insn & (1 << 20)) {
+ /* ldrd */
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rs, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* strd */
+ tmp = load_reg(s, rs);
+ gen_st32(tmp, addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rd);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ if (rn == 15)
+ goto illegal_op;
+ tcg_gen_addi_i32(addr, addr, offset - 4);
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ } else if ((insn & (1 << 23)) == 0) {
+ /* Load/store exclusive word. */
+ gen_movl_T1_reg(s, rn);
+ addr = cpu_T[1];
+ if (insn & (1 << 20)) {
+ gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
+ } else {
+ int label = gen_new_label();
+ gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
+ 0, label);
+ tmp = load_reg(s, rs);
+ gen_st32(tmp, cpu_T[1], IS_USER(s));
+ gen_set_label(label);
+ gen_movl_reg_T0(s, rd);
+ }
+ } else if ((insn & (1 << 6)) == 0) {
+ /* Table Branch. */
+ if (rn == 15) {
+ addr = new_tmp();
+ tcg_gen_movi_i32(addr, s->pc);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ tmp = load_reg(s, rm);
+ tcg_gen_add_i32(addr, addr, tmp);
+ if (insn & (1 << 4)) {
+ /* tbh */
+ tcg_gen_add_i32(addr, addr, tmp);
+ dead_tmp(tmp);
+ tmp = gen_ld16u(addr, IS_USER(s));
+ } else { /* tbb */
+ dead_tmp(tmp);
+ tmp = gen_ld8u(addr, IS_USER(s));
+ }
+ dead_tmp(addr);
+ tcg_gen_shli_i32(tmp, tmp, 1);
+ tcg_gen_addi_i32(tmp, tmp, s->pc);
+ store_reg(s, 15, tmp);
+ } else {
+ /* Load/store exclusive byte/halfword/doubleword. */
+ /* ??? These are not really atomic. However we know
+ we never have multiple CPUs running in parallel,
+ so it is good enough. */
+ op = (insn >> 4) & 0x3;
+ /* Must use a global reg for the address because we have
+ a conditional branch in the store instruction. */
+ gen_movl_T1_reg(s, rn);
+ addr = cpu_T[1];
+ if (insn & (1 << 20)) {
+ gen_helper_mark_exclusive(cpu_env, addr);
+ switch (op) {
+ case 0:
+ tmp = gen_ld8u(addr, IS_USER(s));
+ break;
+ case 1:
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ case 3:
+ tmp = gen_ld32(addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp2 = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp2);
+ break;
+ default:
+ goto illegal_op;
+ }
+ store_reg(s, rs, tmp);
+ } else {
+ int label = gen_new_label();
+ /* Must use a global that is not killed by the branch. */
+ gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
+ tmp = load_reg(s, rs);
+ switch (op) {
+ case 0:
+ gen_st8(tmp, addr, IS_USER(s));
+ break;
+ case 1:
+ gen_st16(tmp, addr, IS_USER(s));
+ break;
+ case 3:
+ gen_st32(tmp, addr, IS_USER(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rd);
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ default:
+ goto illegal_op;
+ }
+ gen_set_label(label);
+ gen_movl_reg_T0(s, rm);
+ }
+ }
+ } else {
+ /* Load/store multiple, RFE, SRS. */
+ if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
+ /* Not available in user mode. */
+ if (IS_USER(s))
+ goto illegal_op;
+ if (insn & (1 << 20)) {
+ /* rfe */
+ addr = load_reg(s, rn);
+ if ((insn & (1 << 24)) == 0)
+ tcg_gen_addi_i32(addr, addr, -8);
+ /* Load PC into tmp and CPSR into tmp2. */
+ tmp = gen_ld32(addr, 0);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp2 = gen_ld32(addr, 0);
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ tcg_gen_addi_i32(addr, addr, -4);
+ }
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ gen_rfe(s, tmp, tmp2);
+ } else {
+ /* srs */
+ op = (insn & 0x1f);
+ if (op == (env->uncached_cpsr & CPSR_M)) {
+ addr = load_reg(s, 13);
+ } else {
+ addr = new_tmp();
+ gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
+ }
+ if ((insn & (1 << 24)) == 0) {
+ tcg_gen_addi_i32(addr, addr, -8);
+ }
+ tmp = load_reg(s, 14);
+ gen_st32(tmp, addr, 0);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = new_tmp();
+ gen_helper_cpsr_read(tmp);
+ gen_st32(tmp, addr, 0);
+ if (insn & (1 << 21)) {
+ if ((insn & (1 << 24)) == 0) {
+ tcg_gen_addi_i32(addr, addr, -4);
+ } else {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ if (op == (env->uncached_cpsr & CPSR_M)) {
+ store_reg(s, 13, addr);
+ } else {
+ gen_helper_set_r13_banked(cpu_env,
+ tcg_const_i32(op), addr);
+ }
+ } else {
+ dead_tmp(addr);
+ }
+ }
+ } else {
+ int i;
+ /* Load/store multiple. */
+ addr = load_reg(s, rn);
+ offset = 0;
+ for (i = 0; i < 16; i++) {
+ if (insn & (1 << i))
+ offset += 4;
+ }
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+
+ for (i = 0; i < 16; i++) {
+ if ((insn & (1 << i)) == 0)
+ continue;
+ if (insn & (1 << 20)) {
+ /* Load. */
+ tmp = gen_ld32(addr, IS_USER(s));
+ if (i == 15) {
+ gen_bx(s, tmp);
+ } else {
+ store_reg(s, i, tmp);
+ }
+ } else {
+ /* Store. */
+ tmp = load_reg(s, i);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ if (insn & (1 << 21)) {
+ /* Base register writeback. */
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+ /* Fault if writeback register is in register list. */
+ if (insn & (1 << rn))
+ goto illegal_op;
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ }
+ }
+ break;
+ case 5: /* Data processing register constant shift. */
+ if (rn == 15)
+ gen_op_movl_T0_im(0);
+ else
+ gen_movl_T0_reg(s, rn);
+ gen_movl_T1_reg(s, rm);
+ op = (insn >> 21) & 0xf;
+ shiftop = (insn >> 4) & 3;
+ shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
+ conds = (insn & (1 << 20)) != 0;
+ logic_cc = (conds && thumb2_logic_op(op));
+ gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
+ if (gen_thumb2_data_op(s, op, conds, 0))
+ goto illegal_op;
+ if (rd != 15)
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 13: /* Misc data processing. */
+ op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
+ if (op < 4 && (insn & 0xf000) != 0xf000)
+ goto illegal_op;
+ switch (op) {
+ case 0: /* Register controlled shift. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if ((insn & 0x70) != 0)
+ goto illegal_op;
+ op = (insn >> 21) & 3;
+ logic_cc = (insn & (1 << 20)) != 0;
+ gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
+ if (logic_cc)
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
+ break;
+ case 1: /* Sign/zero extend. */
+ tmp = load_reg(s, rm);
+ shift = (insn >> 4) & 3;
+ /* ??? In many cases it's not neccessary to do a
+ rotate, a shift is sufficient. */
+ if (shift != 0)
+ tcg_gen_rori_i32(tmp, tmp, shift * 8);
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: gen_sxth(tmp); break;
+ case 1: gen_uxth(tmp); break;
+ case 2: gen_sxtb16(tmp); break;
+ case 3: gen_uxtb16(tmp); break;
+ case 4: gen_sxtb(tmp); break;
+ case 5: gen_uxtb(tmp); break;
+ default: goto illegal_op;
+ }
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ if ((op >> 1) == 1) {
+ gen_add16(tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 2: /* SIMD add/subtract. */
+ op = (insn >> 20) & 7;
+ shift = (insn >> 4) & 7;
+ if ((op & 3) == 3 || (shift & 3) == 3)
+ goto illegal_op;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 3: /* Other data processing. */
+ op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
+ if (op < 4) {
+ /* Saturating add/subtract. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if (op & 2)
+ gen_helper_double_saturate(tmp, tmp);
+ if (op & 1)
+ gen_helper_sub_saturate(tmp, tmp2, tmp);
+ else
+ gen_helper_add_saturate(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ } else {
+ tmp = load_reg(s, rn);
+ switch (op) {
+ case 0x0a: /* rbit */
+ gen_helper_rbit(tmp, tmp);
+ break;
+ case 0x08: /* rev */
+ tcg_gen_bswap_i32(tmp, tmp);
+ break;
+ case 0x09: /* rev16 */
+ gen_rev16(tmp);
+ break;
+ case 0x0b: /* revsh */
+ gen_revsh(tmp);
+ break;
+ case 0x10: /* sel */
+ tmp2 = load_reg(s, rm);
+ tmp3 = new_tmp();
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
+ dead_tmp(tmp3);
+ dead_tmp(tmp2);
+ break;
+ case 0x18: /* clz */
+ gen_helper_clz(tmp, tmp);
+ break;
+ default:
+ goto illegal_op;
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
+ op = (insn >> 4) & 0xf;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ switch ((insn >> 20) & 7) {
+ case 0: /* 32 x 32 -> 32 */
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ if (op)
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ else
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ break;
+ case 1: /* 16 x 16 -> 32 */
+ gen_mulxy(tmp, tmp2, op & 2, op & 1);
+ dead_tmp(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ break;
+ case 2: /* Dual multiply add. */
+ case 4: /* Dual multiply subtract. */
+ if (op)
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ /* This addition cannot overflow. */
+ if (insn & (1 << 22)) {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp2);
+ if (rs != 15)
+ {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ break;
+ case 3: /* 32 * 16 -> 32msb */
+ if (op)
+ tcg_gen_sari_i32(tmp2, tmp2, 16);
+ else
+ gen_sxth(tmp2);
+ tmp2 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp2, tmp2, 16);
+ tmp = new_tmp();
+ tcg_gen_trunc_i64_i32(tmp, tmp2);
+ if (rs != 15)
+ {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ break;
+ case 5: case 6: /* 32 * 32 -> 32msb */
+ gen_imull(tmp, tmp2);
+ if (insn & (1 << 5)) {
+ gen_roundqd(tmp, tmp2);
+ dead_tmp(tmp2);
+ } else {
+ dead_tmp(tmp);
+ tmp = tmp2;
+ }
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 21)) {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ dead_tmp(tmp2);
+ }
+ break;
+ case 7: /* Unsigned sum of absolute differences. */
+ gen_helper_usad8(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 6: case 7: /* 64-bit multiply, Divide. */
+ op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if ((op & 0x50) == 0x10) {
+ /* sdiv, udiv */
+ if (!arm_feature(env, ARM_FEATURE_DIV))
+ goto illegal_op;
+ if (op & 0x20)
+ gen_helper_udiv(tmp, tmp, tmp2);
+ else
+ gen_helper_sdiv(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((op & 0xe) == 0xc) {
+ /* Dual multiply accumulate long. */
+ if (op & 1)
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ if (op & 0x10) {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ dead_tmp(tmp2);
+ tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ gen_addq(s, tmp, rs, rd);
+ gen_storeq_reg(s, rs, rd, tmp);
+ } else {
+ if (op & 0x20) {
+ /* Unsigned 64-bit multiply */
+ tmp = gen_mulu_i64_i32(tmp, tmp2);
+ } else {
+ if (op & 8) {
+ /* smlalxy */
+ gen_mulxy(tmp, tmp2, op & 2, op & 1);
+ dead_tmp(tmp2);
+ tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ext_i32_i64(tmp2, tmp);
+ dead_tmp(tmp);
+ tmp = tmp2;
+ } else {
+ /* Signed 64-bit multiply */
+ tmp = gen_muls_i64_i32(tmp, tmp2);
+ }
+ }
+ if (op & 4) {
+ /* umaal */
+ gen_addq_lo(s, tmp, rs);
+ gen_addq_lo(s, tmp, rd);
+ } else if (op & 0x40) {
+ /* 64-bit accumulate. */
+ gen_addq(s, tmp, rs, rd);
+ }
+ gen_storeq_reg(s, rs, rd, tmp);
+ }
+ break;
+ }
+ break;
+ case 6: case 7: case 14: case 15:
+ /* Coprocessor. */
+ if (((insn >> 24) & 3) == 3) {
+ /* Translate into the equivalent ARM encoding. */
+ insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
+ if (disas_neon_data_insn(env, s, insn))
+ goto illegal_op;
+ } else {
+ if (insn & (1 << 28))
+ goto illegal_op;
+ if (disas_coproc_insn (env, s, insn))
+ goto illegal_op;
+ }
+ break;
+ case 8: case 9: case 10: case 11:
+ if (insn & (1 << 15)) {
+ /* Branches, misc control. */
+ if (insn & 0x5000) {
+ /* Unconditional branch. */
+ /* signextend(hw1[10:0]) -> offset[:12]. */
+ offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
+ /* hw1[10:0] -> offset[11:1]. */
+ offset |= (insn & 0x7ff) << 1;
+ /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
+ offset[24:22] already have the same value because of the
+ sign extension above. */
+ offset ^= ((~insn) & (1 << 13)) << 10;
+ offset ^= ((~insn) & (1 << 11)) << 11;
+
+ if (insn & (1 << 14)) {
+ /* Branch and link. */
+ gen_op_movl_T1_im(s->pc | 1);
+ gen_movl_reg_T1(s, 14);
+ }
+
+ offset += s->pc;
+ if (insn & (1 << 12)) {
+ /* b/bl */
+ gen_jmp(s, offset);
+ } else {
+ /* blx */
+ offset &= ~(uint32_t)2;
+ gen_bx_im(s, offset);
+ }
+ } else if (((insn >> 23) & 7) == 7) {
+ /* Misc control */
+ if (insn & (1 << 13))
+ goto illegal_op;
+
+ if (insn & (1 << 26)) {
+ /* Secure monitor call (v6Z) */
+ goto illegal_op; /* not implemented. */
+ } else {
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: /* msr cpsr. */
+ if (IS_M(env)) {
+ tmp = load_reg(s, rn);
+ addr = tcg_const_i32(insn & 0xff);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ gen_lookup_tb(s);
+ break;
+ }
+ /* fall through */
+ case 1: /* msr spsr. */
+ if (IS_M(env))
+ goto illegal_op;
+ gen_movl_T0_reg(s, rn);
+ if (gen_set_psr_T0(s,
+ msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
+ op == 1))
+ goto illegal_op;
+ break;
+ case 2: /* cps, nop-hint. */
+ if (((insn >> 8) & 7) == 0) {
+ gen_nop_hint(s, insn & 0xff);
+ }
+ /* Implemented as NOP in user mode. */
+ if (IS_USER(s))
+ break;
+ offset = 0;
+ imm = 0;
+ if (insn & (1 << 10)) {
+ if (insn & (1 << 7))
+ offset |= CPSR_A;
+ if (insn & (1 << 6))
+ offset |= CPSR_I;
+ if (insn & (1 << 5))
+ offset |= CPSR_F;
+ if (insn & (1 << 9))
+ imm = CPSR_A | CPSR_I | CPSR_F;
+ }
+ if (insn & (1 << 8)) {
+ offset |= 0x1f;
+ imm |= (insn & 0x1f);
+ }
+ if (offset) {
+ gen_op_movl_T0_im(imm);
+ gen_set_psr_T0(s, offset, 0);
+ }
+ break;
+ case 3: /* Special control operations. */
+ op = (insn >> 4) & 0xf;
+ switch (op) {
+ case 2: /* clrex */
+ gen_helper_clrex(cpu_env);
+ break;
+ case 4: /* dsb */
+ case 5: /* dmb */
+ case 6: /* isb */
+ /* These execute as NOPs. */
+ ARCH(7);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 4: /* bxj */
+ /* Trivial implementation equivalent to bx. */
+ tmp = load_reg(s, rn);
+ gen_bx(s, tmp);
+ break;
+ case 5: /* Exception return. */
+ /* Unpredictable in user mode. */
+ goto illegal_op;
+ case 6: /* mrs cpsr. */
+ tmp = new_tmp();
+ if (IS_M(env)) {
+ addr = tcg_const_i32(insn & 0xff);
+ gen_helper_v7m_mrs(tmp, cpu_env, addr);
+ } else {
+ gen_helper_cpsr_read(tmp);
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 7: /* mrs spsr. */
+ /* Not accessible in user mode. */
+ if (IS_USER(s) || IS_M(env))
+ goto illegal_op;
+ tmp = load_cpu_field(spsr);
+ store_reg(s, rd, tmp);
+ break;
+ }
+ }
+ } else {
+ /* Conditional branch. */
+ op = (insn >> 22) & 0xf;
+ /* Generate a conditional jump to next instruction. */
+ s->condlabel = gen_new_label();
+ gen_test_cc(op ^ 1, s->condlabel);
+ s->condjmp = 1;
+
+ /* offset[11:1] = insn[10:0] */
+ offset = (insn & 0x7ff) << 1;
+ /* offset[17:12] = insn[21:16]. */
+ offset |= (insn & 0x003f0000) >> 4;
+ /* offset[31:20] = insn[26]. */
+ offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
+ /* offset[18] = insn[13]. */
+ offset |= (insn & (1 << 13)) << 5;
+ /* offset[19] = insn[11]. */
+ offset |= (insn & (1 << 11)) << 8;
+
+ /* jump to the offset */
+ gen_jmp(s, s->pc + offset);
+ }
+ } else {
+ /* Data processing immediate. */
+ if (insn & (1 << 25)) {
+ if (insn & (1 << 24)) {
+ if (insn & (1 << 20))
+ goto illegal_op;
+ /* Bitfield/Saturate. */
+ op = (insn >> 21) & 7;
+ imm = insn & 0x1f;
+ shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
+ if (rn == 15) {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rn);
+ }
+ switch (op) {
+ case 2: /* Signed bitfield extract. */
+ imm++;
+ if (shift + imm > 32)
+ goto illegal_op;
+ if (imm < 32)
+ gen_sbfx(tmp, shift, imm);
+ break;
+ case 6: /* Unsigned bitfield extract. */
+ imm++;
+ if (shift + imm > 32)
+ goto illegal_op;
+ if (imm < 32)
+ gen_ubfx(tmp, shift, (1u << imm) - 1);
+ break;
+ case 3: /* Bitfield insert/clear. */
+ if (imm < shift)
+ goto illegal_op;
+ imm = imm + 1 - shift;
+ if (imm != 32) {
+ tmp2 = load_reg(s, rd);
+ gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
+ dead_tmp(tmp2);
+ }
+ break;
+ case 7:
+ goto illegal_op;
+ default: /* Saturate. */
+ if (shift) {
+ if (op & 1)
+ tcg_gen_sari_i32(tmp, tmp, shift);
+ else
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ tmp2 = tcg_const_i32(imm);
+ if (op & 4) {
+ /* Unsigned. */
+ if ((op & 1) && shift == 0)
+ gen_helper_usat16(tmp, tmp, tmp2);
+ else
+ gen_helper_usat(tmp, tmp, tmp2);
+ } else {
+ /* Signed. */
+ if ((op & 1) && shift == 0)
+ gen_helper_ssat16(tmp, tmp, tmp2);
+ else
+ gen_helper_ssat(tmp, tmp, tmp2);
+ }
+ break;
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ imm = ((insn & 0x04000000) >> 15)
+ | ((insn & 0x7000) >> 4) | (insn & 0xff);
+ if (insn & (1 << 22)) {
+ /* 16-bit immediate. */
+ imm |= (insn >> 4) & 0xf000;
+ if (insn & (1 << 23)) {
+ /* movt */
+ tmp = load_reg(s, rd);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_ori_i32(tmp, tmp, imm << 16);
+ } else {
+ /* movw */
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, imm);
+ }
+ } else {
+ /* Add/sub 12-bit immediate. */
+ if (rn == 15) {
+ offset = s->pc & ~(uint32_t)3;
+ if (insn & (1 << 23))
+ offset -= imm;
+ else
+ offset += imm;
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, offset);
+ } else {
+ tmp = load_reg(s, rn);
+ if (insn & (1 << 23))
+ tcg_gen_subi_i32(tmp, tmp, imm);
+ else
+ tcg_gen_addi_i32(tmp, tmp, imm);
+ }
+ }
+ store_reg(s, rd, tmp);
+ }
+ } else {
+ int shifter_out = 0;
+ /* modified 12-bit immediate. */
+ shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
+ imm = (insn & 0xff);
+ switch (shift) {
+ case 0: /* XY */
+ /* Nothing to do. */
+ break;
+ case 1: /* 00XY00XY */
+ imm |= imm << 16;
+ break;
+ case 2: /* XY00XY00 */
+ imm |= imm << 16;
+ imm <<= 8;
+ break;
+ case 3: /* XYXYXYXY */
+ imm |= imm << 16;
+ imm |= imm << 8;
+ break;
+ default: /* Rotated constant. */
+ shift = (shift << 1) | (imm >> 7);
+ imm |= 0x80;
+ imm = imm << (32 - shift);
+ shifter_out = 1;
+ break;
+ }
+ gen_op_movl_T1_im(imm);
+ rn = (insn >> 16) & 0xf;
+ if (rn == 15)
+ gen_op_movl_T0_im(0);
+ else
+ gen_movl_T0_reg(s, rn);
+ op = (insn >> 21) & 0xf;
+ if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
+ shifter_out))
+ goto illegal_op;
+ rd = (insn >> 8) & 0xf;
+ if (rd != 15) {
+ gen_movl_reg_T0(s, rd);
+ }
+ }
+ }
+ break;
+ case 12: /* Load/store single data item. */
+ {
+ int postinc = 0;
+ int writeback = 0;
+ int user;
+ if ((insn & 0x01100000) == 0x01000000) {
+ if (disas_neon_ls_insn(env, s, insn))
+ goto illegal_op;
+ break;
+ }
+ user = IS_USER(s);
+ if (rn == 15) {
+ addr = new_tmp();
+ /* PC relative. */
+ /* s->pc has already been incremented by 4. */
+ imm = s->pc & 0xfffffffc;
+ if (insn & (1 << 23))
+ imm += insn & 0xfff;
+ else
+ imm -= insn & 0xfff;
+ tcg_gen_movi_i32(addr, imm);
+ } else {
+ addr = load_reg(s, rn);
+ if (insn & (1 << 23)) {
+ /* Positive offset. */
+ imm = insn & 0xfff;
+ tcg_gen_addi_i32(addr, addr, imm);
+ } else {
+ op = (insn >> 8) & 7;
+ imm = insn & 0xff;
+ switch (op) {
+ case 0: case 8: /* Shifted Register. */
+ shift = (insn >> 4) & 0xf;
+ if (shift > 3)
+ goto illegal_op;
+ tmp = load_reg(s, rm);
+ if (shift)
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ tcg_gen_add_i32(addr, addr, tmp);
+ dead_tmp(tmp);
+ break;
+ case 4: /* Negative offset. */
+ tcg_gen_addi_i32(addr, addr, -imm);
+ break;
+ case 6: /* User privilege. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ user = 1;
+ break;
+ case 1: /* Post-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 3: /* Post-increment. */
+ postinc = 1;
+ writeback = 1;
+ break;
+ case 5: /* Pre-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 7: /* Pre-increment. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ writeback = 1;
+ break;
+ default:
+ goto illegal_op;
+ }
+ }
+ }
+ op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
+ if (insn & (1 << 20)) {
+ /* Load. */
+ if (rs == 15 && op != 2) {
+ if (op & 2)
+ goto illegal_op;
+ /* Memory hint. Implemented as NOP. */
+ } else {
+ switch (op) {
+ case 0: tmp = gen_ld8u(addr, user); break;
+ case 4: tmp = gen_ld8s(addr, user); break;
+ case 1: tmp = gen_ld16u(addr, user); break;
+ case 5: tmp = gen_ld16s(addr, user); break;
+ case 2: tmp = gen_ld32(addr, user); break;
+ default: goto illegal_op;
+ }
+ if (rs == 15) {
+ gen_bx(s, tmp);
+ } else {
+ store_reg(s, rs, tmp);
+ }
+ }
+ } else {
+ /* Store. */
+ if (rs == 15)
+ goto illegal_op;
+ tmp = load_reg(s, rs);
+ switch (op) {
+ case 0: gen_st8(tmp, addr, user); break;
+ case 1: gen_st16(tmp, addr, user); break;
+ case 2: gen_st32(tmp, addr, user); break;
+ default: goto illegal_op;
+ }
+ }
+ if (postinc)
+ tcg_gen_addi_i32(addr, addr, imm);
+ if (writeback) {
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ return 0;
+illegal_op:
+ return 1;
+}
+
+static void disas_thumb_insn(CPUState *env, DisasContext *s)
{
uint32_t val, insn, op, rm, rn, rd, shift, cond;
int32_t offset;
int i;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv addr;
+
+ if (s->condexec_mask) {
+ cond = s->condexec_cond;
+ s->condlabel = gen_new_label();
+ gen_test_cc(cond ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
insn = lduw_code(s->pc);
-#ifdef GEN_TRACE
+#ifdef CONFIG_TRACE
if (tracing) {
- int insn_ticks;
-
- trace_add_insn( insn_wrap_thumb(insn), 1 /* thumb insn */ );
- insn_ticks = get_insn_ticks_thumb(insn);
- gen_op_trace_insn();
- gen_op_add_to_sim_time(insn_ticks);
+ int ticks = get_insn_ticks_thumb(insn);
+ trace_add_insn( insn_wrap_thumb(insn), 1 );
+ gen_helper_traceInsn();
+ gen_traceTicks(ticks);
}
#endif
s->pc += 2;
@@ -1915,18 +8009,27 @@ static void disas_thumb_insn(DisasContext *s)
rm = (insn >> 6) & 7;
gen_movl_T1_reg(s, rm);
}
- if (insn & (1 << 9))
- gen_op_subl_T0_T1_cc();
- else
- gen_op_addl_T0_T1_cc();
+ if (insn & (1 << 9)) {
+ if (s->condexec_mask)
+ gen_op_subl_T0_T1();
+ else
+ gen_op_subl_T0_T1_cc();
+ } else {
+ if (s->condexec_mask)
+ gen_op_addl_T0_T1();
+ else
+ gen_op_addl_T0_T1_cc();
+ }
gen_movl_reg_T0(s, rd);
} else {
/* shift immediate */
rm = (insn >> 3) & 7;
shift = (insn >> 6) & 0x1f;
- gen_movl_T0_reg(s, rm);
- gen_shift_T0_im_thumb[op](shift);
- gen_movl_reg_T0(s, rd);
+ tmp = load_reg(s, rm);
+ gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
}
break;
case 2: case 3:
@@ -1941,16 +8044,23 @@ static void disas_thumb_insn(DisasContext *s)
}
switch (op) {
case 0: /* mov */
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 1: /* cmp */
gen_op_subl_T0_T1_cc();
break;
case 2: /* add */
- gen_op_addl_T0_T1_cc();
+ if (s->condexec_mask)
+ gen_op_addl_T0_T1();
+ else
+ gen_op_addl_T0_T1_cc();
break;
case 3: /* sub */
- gen_op_subl_T0_T1_cc();
+ if (s->condexec_mask)
+ gen_op_subl_T0_T1();
+ else
+ gen_op_subl_T0_T1_cc();
break;
}
if (op != 1)
@@ -1962,9 +8072,11 @@ static void disas_thumb_insn(DisasContext *s)
/* load pc-relative. Bit 1 of PC is ignored. */
val = s->pc + 2 + ((insn & 0xff) * 4);
val &= ~(uint32_t)2;
- gen_op_movl_T1_im(val);
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, rd);
+ addr = new_tmp();
+ tcg_gen_movi_i32(addr, val);
+ tmp = gen_ld32(addr, IS_USER(s));
+ dead_tmp(addr);
+ store_reg(s, rd, tmp);
break;
}
if (insn & (1 << 10)) {
@@ -1989,13 +8101,14 @@ static void disas_thumb_insn(DisasContext *s)
gen_movl_reg_T0(s, rd);
break;
case 3:/* branch [and link] exchange thumb register */
+ tmp = load_reg(s, rm);
if (insn & (1 << 7)) {
val = (uint32_t)s->pc | 1;
- gen_op_movl_T1_im(val);
- gen_movl_reg_T1(s, 14);
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, val);
+ store_reg(s, 14, tmp2);
}
- gen_movl_T0_reg(s, rm);
- gen_bx(s);
+ gen_bx(s, tmp);
break;
}
break;
@@ -2024,33 +8137,57 @@ static void disas_thumb_insn(DisasContext *s)
switch (op) {
case 0x0: /* and */
gen_op_andl_T0_T1();
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 0x1: /* eor */
gen_op_xorl_T0_T1();
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 0x2: /* lsl */
- gen_op_shll_T1_T0_cc();
- gen_op_logic_T1_cc();
+ if (s->condexec_mask) {
+ gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
+ } else {
+ gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
+ gen_op_logic_T1_cc();
+ }
break;
case 0x3: /* lsr */
- gen_op_shrl_T1_T0_cc();
- gen_op_logic_T1_cc();
+ if (s->condexec_mask) {
+ gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
+ } else {
+ gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
+ gen_op_logic_T1_cc();
+ }
break;
case 0x4: /* asr */
- gen_op_sarl_T1_T0_cc();
- gen_op_logic_T1_cc();
+ if (s->condexec_mask) {
+ gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
+ } else {
+ gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
+ gen_op_logic_T1_cc();
+ }
break;
case 0x5: /* adc */
- gen_op_adcl_T0_T1_cc();
+ if (s->condexec_mask)
+ gen_adc_T0_T1();
+ else
+ gen_op_adcl_T0_T1_cc();
break;
case 0x6: /* sbc */
- gen_op_sbcl_T0_T1_cc();
+ if (s->condexec_mask)
+ gen_sbc_T0_T1();
+ else
+ gen_op_sbcl_T0_T1_cc();
break;
case 0x7: /* ror */
- gen_op_rorl_T1_T0_cc();
- gen_op_logic_T1_cc();
+ if (s->condexec_mask) {
+ gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
+ } else {
+ gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
+ gen_op_logic_T1_cc();
+ }
break;
case 0x8: /* tst */
gen_op_andl_T0_T1();
@@ -2058,7 +8195,10 @@ static void disas_thumb_insn(DisasContext *s)
rd = 16;
break;
case 0x9: /* neg */
- gen_op_subl_T0_T1_cc();
+ if (s->condexec_mask)
+ tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
+ else
+ gen_op_subl_T0_T1_cc();
break;
case 0xa: /* cmp */
gen_op_subl_T0_T1_cc();
@@ -2070,19 +8210,23 @@ static void disas_thumb_insn(DisasContext *s)
break;
case 0xc: /* orr */
gen_op_orl_T0_T1();
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 0xd: /* mul */
gen_op_mull_T0_T1();
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 0xe: /* bic */
gen_op_bicl_T0_T1();
- gen_op_logic_T0_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T0_cc();
break;
case 0xf: /* mvn */
gen_op_notl_T1();
- gen_op_logic_T1_cc();
+ if (!s->condexec_mask)
+ gen_op_logic_T1_cc();
val = 1;
rm = rd;
break;
@@ -2101,120 +8245,122 @@ static void disas_thumb_insn(DisasContext *s)
rn = (insn >> 3) & 7;
rm = (insn >> 6) & 7;
op = (insn >> 9) & 7;
- gen_movl_T1_reg(s, rn);
- gen_movl_T2_reg(s, rm);
- gen_op_addl_T1_T2();
+ addr = load_reg(s, rn);
+ tmp = load_reg(s, rm);
+ tcg_gen_add_i32(addr, addr, tmp);
+ dead_tmp(tmp);
if (op < 3) /* store */
- gen_movl_T0_reg(s, rd);
+ tmp = load_reg(s, rd);
switch (op) {
case 0: /* str */
- gen_ldst(stl, s);
+ gen_st32(tmp, addr, IS_USER(s));
break;
case 1: /* strh */
- gen_ldst(stw, s);
+ gen_st16(tmp, addr, IS_USER(s));
break;
case 2: /* strb */
- gen_ldst(stb, s);
+ gen_st8(tmp, addr, IS_USER(s));
break;
case 3: /* ldrsb */
- gen_ldst(ldsb, s);
+ tmp = gen_ld8s(addr, IS_USER(s));
break;
case 4: /* ldr */
- gen_ldst(ldl, s);
+ tmp = gen_ld32(addr, IS_USER(s));
break;
case 5: /* ldrh */
- gen_ldst(lduw, s);
+ tmp = gen_ld16u(addr, IS_USER(s));
break;
case 6: /* ldrb */
- gen_ldst(ldub, s);
+ tmp = gen_ld8u(addr, IS_USER(s));
break;
case 7: /* ldrsh */
- gen_ldst(ldsw, s);
+ tmp = gen_ld16s(addr, IS_USER(s));
break;
}
if (op >= 3) /* load */
- gen_movl_reg_T0(s, rd);
+ store_reg(s, rd, tmp);
+ dead_tmp(addr);
break;
case 6:
/* load/store word immediate offset */
rd = insn & 7;
rn = (insn >> 3) & 7;
- gen_movl_T1_reg(s, rn);
+ addr = load_reg(s, rn);
val = (insn >> 4) & 0x7c;
- gen_op_movl_T2_im(val);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, val);
if (insn & (1 << 11)) {
/* load */
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, rd);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
} else {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stl, s);
+ tmp = load_reg(s, rd);
+ gen_st32(tmp, addr, IS_USER(s));
}
+ dead_tmp(addr);
break;
case 7:
/* load/store byte immediate offset */
rd = insn & 7;
rn = (insn >> 3) & 7;
- gen_movl_T1_reg(s, rn);
+ addr = load_reg(s, rn);
val = (insn >> 6) & 0x1f;
- gen_op_movl_T2_im(val);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, val);
if (insn & (1 << 11)) {
/* load */
- gen_ldst(ldub, s);
- gen_movl_reg_T0(s, rd);
+ tmp = gen_ld8u(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
} else {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stb, s);
+ tmp = load_reg(s, rd);
+ gen_st8(tmp, addr, IS_USER(s));
}
+ dead_tmp(addr);
break;
case 8:
/* load/store halfword immediate offset */
rd = insn & 7;
rn = (insn >> 3) & 7;
- gen_movl_T1_reg(s, rn);
+ addr = load_reg(s, rn);
val = (insn >> 5) & 0x3e;
- gen_op_movl_T2_im(val);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, val);
if (insn & (1 << 11)) {
/* load */
- gen_ldst(lduw, s);
- gen_movl_reg_T0(s, rd);
+ tmp = gen_ld16u(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
} else {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stw, s);
+ tmp = load_reg(s, rd);
+ gen_st16(tmp, addr, IS_USER(s));
}
+ dead_tmp(addr);
break;
case 9:
/* load/store from stack */
rd = (insn >> 8) & 7;
- gen_movl_T1_reg(s, 13);
+ addr = load_reg(s, 13);
val = (insn & 0xff) * 4;
- gen_op_movl_T2_im(val);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, val);
if (insn & (1 << 11)) {
/* load */
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, rd);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, rd, tmp);
} else {
/* store */
- gen_movl_T0_reg(s, rd);
- gen_ldst(stl, s);
+ tmp = load_reg(s, rd);
+ gen_st32(tmp, addr, IS_USER(s));
}
+ dead_tmp(addr);
break;
case 10:
@@ -2222,15 +8368,15 @@ static void disas_thumb_insn(DisasContext *s)
rd = (insn >> 8) & 7;
if (insn & (1 << 11)) {
/* SP */
- gen_movl_T0_reg(s, 13);
+ tmp = load_reg(s, 13);
} else {
/* PC. bit 1 is ignored. */
- gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
}
val = (insn & 0xff) * 4;
- gen_op_movl_T1_im(val);
- gen_op_addl_T0_T1();
- gen_movl_reg_T0(s, rd);
+ tcg_gen_addi_i32(tmp, tmp, val);
+ store_reg(s, rd, tmp);
break;
case 11:
@@ -2239,18 +8385,30 @@ static void disas_thumb_insn(DisasContext *s)
switch (op) {
case 0:
/* adjust stack pointer */
- gen_movl_T1_reg(s, 13);
+ tmp = load_reg(s, 13);
val = (insn & 0x7f) * 4;
if (insn & (1 << 7))
- val = -(int32_t)val;
- gen_op_movl_T2_im(val);
- gen_op_addl_T1_T2();
- gen_movl_reg_T1(s, 13);
+ val = -(int32_t)val;
+ tcg_gen_addi_i32(tmp, tmp, val);
+ store_reg(s, 13, tmp);
break;
+ case 2: /* sign/zero extend. */
+ ARCH(6);
+ rd = insn & 7;
+ rm = (insn >> 3) & 7;
+ tmp = load_reg(s, rm);
+ switch ((insn >> 6) & 3) {
+ case 0: gen_sxth(tmp); break;
+ case 1: gen_sxtb(tmp); break;
+ case 2: gen_uxth(tmp); break;
+ case 3: gen_uxtb(tmp); break;
+ }
+ store_reg(s, rd, tmp);
+ break;
case 4: case 5: case 0xc: case 0xd:
/* push/pop */
- gen_movl_T1_reg(s, 13);
+ addr = load_reg(s, 13);
if (insn & (1 << 8))
offset = 4;
else
@@ -2260,56 +8418,124 @@ static void disas_thumb_insn(DisasContext *s)
offset += 4;
}
if ((insn & (1 << 11)) == 0) {
- gen_op_movl_T2_im(-offset);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, -offset);
}
- gen_op_movl_T2_im(4);
for (i = 0; i < 8; i++) {
if (insn & (1 << i)) {
if (insn & (1 << 11)) {
/* pop */
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, i);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, i, tmp);
} else {
/* push */
- gen_movl_T0_reg(s, i);
- gen_ldst(stl, s);
+ tmp = load_reg(s, i);
+ gen_st32(tmp, addr, IS_USER(s));
}
/* advance to the next address. */
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, 4);
}
}
+ TCGV_UNUSED(tmp);
if (insn & (1 << 8)) {
if (insn & (1 << 11)) {
/* pop pc */
- gen_ldst(ldl, s);
+ tmp = gen_ld32(addr, IS_USER(s));
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
- gen_movl_T0_reg(s, 14);
- gen_ldst(stl, s);
+ tmp = load_reg(s, 14);
+ gen_st32(tmp, addr, IS_USER(s));
}
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, 4);
}
if ((insn & (1 << 11)) == 0) {
- gen_op_movl_T2_im(-offset);
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, -offset);
}
/* write back the new stack pointer */
- gen_movl_reg_T1(s, 13);
+ store_reg(s, 13, addr);
/* set the new PC value */
if ((insn & 0x0900) == 0x0900)
- gen_bx(s);
+ gen_bx(s, tmp);
+ break;
+
+ case 1: case 3: case 9: case 11: /* czb */
+ rm = insn & 7;
+ tmp = load_reg(s, rm);
+ s->condlabel = gen_new_label();
+ s->condjmp = 1;
+ if (insn & (1 << 11))
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
+ else
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
+ dead_tmp(tmp);
+ offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
+ val = (uint32_t)s->pc + 2;
+ val += offset;
+ gen_jmp(s, val);
+ break;
+
+ case 15: /* IT, nop-hint. */
+ if ((insn & 0xf) == 0) {
+ gen_nop_hint(s, (insn >> 4) & 0xf);
+ break;
+ }
+ /* If Then. */
+ s->condexec_cond = (insn >> 4) & 0xe;
+ s->condexec_mask = insn & 0x1f;
+ /* No actual code generated for this insn, just setup state. */
break;
case 0xe: /* bkpt */
- gen_op_movl_T0_im((long)s->pc - 2);
- gen_op_movl_reg_TN[0][15]();
- gen_op_bkpt();
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 2);
+ gen_exception(EXCP_BKPT);
s->is_jmp = DISAS_JUMP;
break;
+ case 0xa: /* rev */
+ ARCH(6);
+ rn = (insn >> 3) & 0x7;
+ rd = insn & 0x7;
+ tmp = load_reg(s, rn);
+ switch ((insn >> 6) & 3) {
+ case 0: tcg_gen_bswap_i32(tmp, tmp); break;
+ case 1: gen_rev16(tmp); break;
+ case 3: gen_revsh(tmp); break;
+ default: goto illegal_op;
+ }
+ store_reg(s, rd, tmp);
+ break;
+
+ case 6: /* cps */
+ ARCH(6);
+ if (IS_USER(s))
+ break;
+ if (IS_M(env)) {
+ tmp = tcg_const_i32((insn & (1 << 4)) != 0);
+ /* PRIMASK */
+ if (insn & 1) {
+ addr = tcg_const_i32(16);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ }
+ /* FAULTMASK */
+ if (insn & 2) {
+ addr = tcg_const_i32(17);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ }
+ gen_lookup_tb(s);
+ } else {
+ if (insn & (1 << 4))
+ shift = CPSR_A | CPSR_I | CPSR_F;
+ else
+ shift = 0;
+
+ val = ((insn & 7) << 6) & shift;
+ gen_op_movl_T0_im(val);
+ gen_set_psr_T0(s, shift, 0);
+ }
+ break;
+
default:
goto undef;
}
@@ -2318,26 +8544,28 @@ static void disas_thumb_insn(DisasContext *s)
case 12:
/* load/store multiple */
rn = (insn >> 8) & 0x7;
- gen_movl_T1_reg(s, rn);
- gen_op_movl_T2_im(4);
+ addr = load_reg(s, rn);
for (i = 0; i < 8; i++) {
if (insn & (1 << i)) {
if (insn & (1 << 11)) {
/* load */
- gen_ldst(ldl, s);
- gen_movl_reg_T0(s, i);
+ tmp = gen_ld32(addr, IS_USER(s));
+ store_reg(s, i, tmp);
} else {
/* store */
- gen_movl_T0_reg(s, i);
- gen_ldst(stl, s);
+ tmp = load_reg(s, i);
+ gen_st32(tmp, addr, IS_USER(s));
}
/* advance to the next address */
- gen_op_addl_T1_T2();
+ tcg_gen_addi_i32(addr, addr, 4);
}
}
/* Base register writeback. */
- if ((insn & (1 << rn)) == 0)
- gen_movl_reg_T1(s, rn);
+ if ((insn & (1 << rn)) == 0) {
+ store_reg(s, rn, addr);
+ } else {
+ dead_tmp(addr);
+ }
break;
case 13:
@@ -2348,19 +8576,15 @@ static void disas_thumb_insn(DisasContext *s)
if (cond == 0xf) {
/* swi */
- gen_op_movl_T0_im((long)s->pc | 1);
- /* Don't set r15. */
- gen_op_movl_reg_TN[0][15]();
- gen_op_swi();
- s->is_jmp = DISAS_JUMP;
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_SWI;
break;
}
/* generate a conditional jump to next instruction */
s->condlabel = gen_new_label();
- gen_test_cc[cond ^ 1](s->condlabel);
+ gen_test_cc(cond ^ 1, s->condlabel);
s->condjmp = 1;
- //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
- //s->is_jmp = DISAS_JUMP_NEXT;
gen_movl_T1_reg(s, 15);
/* jump to the offset */
@@ -2371,22 +8595,12 @@ static void disas_thumb_insn(DisasContext *s)
break;
case 14:
- /* unconditional branch */
if (insn & (1 << 11)) {
- /* Second half of blx. */
- offset = ((insn & 0x7ff) << 1);
- gen_movl_T0_reg(s, 14);
- gen_op_movl_T1_im(offset);
- gen_op_addl_T0_T1();
- gen_op_movl_T1_im(0xfffffffc);
- gen_op_andl_T0_T1();
-
- val = (uint32_t)s->pc;
- gen_op_movl_T1_im(val | 1);
- gen_movl_reg_T1(s, 14);
- gen_bx(s);
+ if (disas_thumb2_insn(env, s, insn))
+ goto undef32;
break;
}
+ /* unconditional branch */
val = (uint32_t)s->pc;
offset = ((int32_t)insn << 21) >> 21;
val += (offset << 1) + 2;
@@ -2394,114 +8608,127 @@ static void disas_thumb_insn(DisasContext *s)
break;
case 15:
- /* branch and link [and switch to arm] */
- if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
- /* Instruction spans a page boundary. Implement it as two
- 16-bit instructions in case the second half causes an
- prefetch abort. */
- offset = ((int32_t)insn << 21) >> 9;
- val = s->pc + 2 + offset;
- gen_op_movl_T0_im(val);
- gen_movl_reg_T0(s, 14);
- break;
- }
- if (insn & (1 << 11)) {
- /* Second half of bl. */
- offset = ((insn & 0x7ff) << 1) | 1;
- gen_movl_T0_reg(s, 14);
- gen_op_movl_T1_im(offset);
- gen_op_addl_T0_T1();
-
- val = (uint32_t)s->pc;
- gen_op_movl_T1_im(val | 1);
- gen_movl_reg_T1(s, 14);
- gen_bx(s);
- break;
- }
- offset = ((int32_t)insn << 21) >> 10;
- insn = lduw_code(s->pc);
-#ifdef GEN_TRACE
- if (tracing) {
- int insn_ticks;
-
- trace_add_insn( insn_wrap_thumb(insn), 1 /* thumb insn */ );
- insn_ticks = get_insn_ticks_thumb(insn);
- gen_op_trace_insn();
- gen_op_add_to_sim_time(insn_ticks);
- }
-#endif
- offset |= insn & 0x7ff;
-
- val = (uint32_t)s->pc + 2;
- gen_op_movl_T1_im(val | 1);
- gen_movl_reg_T1(s, 14);
-
- val += offset << 1;
- if (insn & (1 << 12)) {
- /* bl */
- gen_jmp(s, val);
- } else {
- /* blx */
- val &= ~(uint32_t)2;
- gen_op_movl_T0_im(val);
- gen_bx(s);
- }
+ if (disas_thumb2_insn(env, s, insn))
+ goto undef32;
+ break;
}
return;
+undef32:
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 4);
+ gen_exception(EXCP_UDEF);
+ s->is_jmp = DISAS_JUMP;
+ return;
+illegal_op:
undef:
- gen_op_movl_T0_im((long)s->pc - 2);
- gen_op_movl_reg_TN[0][15]();
- gen_op_undef_insn();
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 2);
+ gen_exception(EXCP_UDEF);
s->is_jmp = DISAS_JUMP;
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline int gen_intermediate_code_internal(CPUState *env,
- TranslationBlock *tb,
- int search_pc)
+static inline void gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
{
DisasContext dc1, *dc = &dc1;
uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
/* generate intermediate code */
+ num_temps = 0;
+ memset(temps, 0, sizeof(temps));
+
pc_start = tb->pc;
dc->tb = tb;
- gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
- gen_opparam_ptr = gen_opparam_buf;
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = env->singlestep_enabled;
dc->condjmp = 0;
dc->thumb = env->thumb;
+ dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
+ dc->condexec_cond = env->condexec_bits >> 4;
+ dc->is_mem = 0;
#if !defined(CONFIG_USER_ONLY)
- dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
+ if (IS_M(env)) {
+ dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
+ } else {
+ dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
+ }
#endif
+ cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
+ cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
+ cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
+ cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
+ cpu_V0 = cpu_F0d;
+ cpu_V1 = cpu_F1d;
+ /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
+ cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- nb_gen_labels = 0;
lj = -1;
-#ifdef GEN_TRACE
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0)
+ max_insns = CF_COUNT_MASK;
+
+ gen_icount_start();
+ /* Reset the conditional execution bits immediately. This avoids
+ complications trying to do it at the end of the block. */
+ if (env->condexec_bits)
+ {
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ store_cpu_field(tmp, condexec_bits);
+ }
+#ifdef CONFIG_TRACE
if (tracing) {
- gen_op_trace_bb(trace_static.bb_num, (long) tb);
+ gen_traceBB(trace_static.bb_num, (target_phys_addr_t)tb );
trace_bb_start(dc->pc);
}
#endif
+
do {
+#ifdef CONFIG_USER_ONLY
+ /* Intercept jump to the magic kernel page. */
+ if (dc->pc >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception(EXCP_KERNEL_TRAP);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ }
+#else
+ if (dc->pc >= 0xfffffff0 && IS_M(env)) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception(EXCP_EXCEPTION_EXIT);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ }
+#endif
+
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == dc->pc) {
- gen_op_movl_T0_im((long)dc->pc);
- gen_op_movl_reg_TN[0][15]();
- gen_op_debug();
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc->pc);
+ gen_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
+ /* Advance PC so that clearing the breakpoint will
+ invalidate this TB. */
+ dc->pc += 2;
+ goto done_generating;
break;
}
}
@@ -2515,46 +8742,101 @@ static inline int gen_intermediate_code_internal(CPUState *env,
}
gen_opc_pc[lj] = dc->pc;
gen_opc_instr_start[lj] = 1;
+ gen_opc_icount[lj] = num_insns;
}
- if (env->thumb)
- disas_thumb_insn(dc);
- else
- disas_arm_insn(env, dc);
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+
+ if (env->thumb) {
+ disas_thumb_insn(env, dc);
+ if (dc->condexec_mask) {
+ dc->condexec_cond = (dc->condexec_cond & 0xe)
+ | ((dc->condexec_mask >> 4) & 1);
+ dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
+ if (dc->condexec_mask == 0) {
+ dc->condexec_cond = 0;
+ }
+ }
+ } else {
+ disas_arm_insn(env, dc);
+ }
+ if (num_temps) {
+ fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
+ num_temps = 0;
+ }
if (dc->condjmp && !dc->is_jmp) {
gen_set_label(dc->condlabel);
dc->condjmp = 0;
}
+ /* Terminate the TB on memory ops if watchpoints are present. */
+ /* FIXME: This should be replacd by the deterministic execution
+ * IRQ raising bits. */
+ if (dc->is_mem && env->nb_watchpoints)
+ break;
+
/* Translation stops when a conditional branch is enoutered.
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
- * ensures prefech aborts occur at the right place. */
+ * ensures prefetch aborts occur at the right place. */
+ num_insns ++;
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
!env->singlestep_enabled &&
- dc->pc < next_page_start);
-#ifdef GEN_TRACE
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+#ifdef CONFIG_TRACE
if (tracing) {
trace_bb_end();
}
#endif
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(env, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
+
/* At this stage dc->condjmp will only be set when the skipped
- * instruction was a conditional branch, and the PC has already been
- * written. */
- if (__builtin_expect(env->singlestep_enabled, 0)) {
+ instruction was a conditional branch or trap, and the PC has
+ already been written. */
+ if (unlikely(env->singlestep_enabled)) {
/* Make sure the pc is updated, and raise a debug exception. */
if (dc->condjmp) {
- gen_op_debug();
+ gen_set_condexec(dc);
+ if (dc->is_jmp == DISAS_SWI) {
+ gen_exception(EXCP_SWI);
+ } else {
+ gen_exception(EXCP_DEBUG);
+ }
gen_set_label(dc->condlabel);
}
if (dc->condjmp || !dc->is_jmp) {
- gen_op_movl_T0_im((long)dc->pc);
- gen_op_movl_reg_TN[0][15]();
+ gen_set_pc_im(dc->pc);
dc->condjmp = 0;
}
- gen_op_debug();
+ gen_set_condexec(dc);
+ if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
+ gen_exception(EXCP_SWI);
+ } else {
+ /* FIXME: Single stepping a WFI insn will not halt
+ the CPU. */
+ gen_exception(EXCP_DEBUG);
+ }
} else {
+ /* While branches must always occur at the end of an IT block,
+ there are a few other things that can cause us to terminate
+ the TB in the middel of an IT block:
+ - Exception generating instructions (bkpt, swi, undefined).
+ - Page boundaries.
+ - Hardware watchpoints.
+ Hardware breakpoints have already been handled and skip this code.
+ */
+ gen_set_condexec(dc);
switch(dc->is_jmp) {
case DISAS_NEXT:
gen_goto_tb(dc, 1, dc->pc);
@@ -2563,19 +8845,28 @@ static inline int gen_intermediate_code_internal(CPUState *env,
case DISAS_JUMP:
case DISAS_UPDATE:
/* indicate that the hash table must be used to find the next TB */
- gen_op_movl_T0_0();
- gen_op_exit_tb();
+ tcg_gen_exit_tb(0);
break;
case DISAS_TB_JUMP:
/* nothing more to generate */
break;
+ case DISAS_WFI:
+ gen_helper_wfi();
+ break;
+ case DISAS_SWI:
+ gen_exception(EXCP_SWI);
+ break;
}
if (dc->condjmp) {
gen_set_label(dc->condlabel);
+ gen_set_condexec(dc);
gen_goto_tb(dc, 1, dc->pc);
dc->condjmp = 0;
}
}
+
+done_generating:
+ gen_icount_end(tb, num_insns);
*gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
@@ -2584,11 +8875,6 @@ static inline int gen_intermediate_code_internal(CPUState *env,
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
fprintf(logfile, "\n");
- if (loglevel & (CPU_LOG_TB_OP)) {
- fprintf(logfile, "OP:\n");
- dump_ops(gen_opc_buf, gen_opparam_buf);
- fprintf(logfile, "\n");
- }
}
#endif
if (search_pc) {
@@ -2596,74 +8882,45 @@ static inline int gen_intermediate_code_internal(CPUState *env,
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
- tb->size = 0;
} else {
tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
- return 0;
-}
-
-#if defined(GEN_TRACE)
-static int trace_gen_intermediate_code(CPUState *env, TranslationBlock *tb)
-{
- return gen_intermediate_code_internal(env, tb, 0);
-}
-
-static int trace_gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
-{
- return gen_intermediate_code_internal(env, tb, 1);
}
-void qemu_trace_enable(void)
-{
- _gen_intermediate_code = trace_gen_intermediate_code;
- _gen_intermediate_code_pc = trace_gen_intermediate_code_pc;
-}
-#else
-
-static int default_gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 0);
+ gen_intermediate_code_internal(env, tb, 0);
}
-static int default_gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
{
- return gen_intermediate_code_internal(env, tb, 1);
+ gen_intermediate_code_internal(env, tb, 1);
}
-gen_intermediate_code_func _gen_intermediate_code = default_gen_intermediate_code;
-gen_intermediate_code_func _gen_intermediate_code_pc = default_gen_intermediate_code_pc;
-
-int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
-{
- return (*_gen_intermediate_code)(env, tb);
-}
-
-int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
-{
- return (*_gen_intermediate_code_pc)(env, tb);
-}
-
-void qemu_trace_disable( void )
-{
- _gen_intermediate_code = default_gen_intermediate_code;
- _gen_intermediate_code_pc = default_gen_intermediate_code_pc;
-}
-
-static const char * const cpu_mode_names[16] = {
+static const char *cpu_mode_names[16] = {
"usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
"???", "???", "???", "und", "???", "???", "???", "sys"
};
+
void cpu_dump_state(CPUState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
int i;
+#if 0
union {
uint32_t i;
float s;
} s0, s1;
CPU_DoubleU d;
+ /* ??? This assumes float64 and double have the same layout.
+ Oh well, it's only debug dumps. */
+ union {
+ float64 f64;
+ double d;
+ } d0;
+#endif
uint32_t psr;
for(i=0;i<16;i++) {
@@ -2674,7 +8931,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
cpu_fprintf(f, " ");
}
psr = cpsr_read(env);
- cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
psr,
psr & (1 << 31) ? 'N' : '-',
psr & (1 << 30) ? 'Z' : '-',
@@ -2683,17 +8940,24 @@ void cpu_dump_state(CPUState *env, FILE *f,
psr & CPSR_T ? 'T' : 'A',
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
+#if 0
for (i = 0; i < 16; i++) {
d.d = env->vfp.regs[i];
s0.i = d.l.lower;
s1.i = d.l.upper;
- cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
+ d0.f64 = d.d;
+ cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
i * 2, (int)s0.i, s0.s,
- i * 2 + 1, (int)s0.i, s0.s,
+ i * 2 + 1, (int)s1.i, s1.s,
i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
- d.d);
+ d0.d);
}
cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
+#endif
}
-#endif
+void gen_pc_load(CPUState *env, TranslationBlock *tb,
+ unsigned long searched_pc, int pc_pos, void *puc)
+{
+ env->regs[15] = gen_opc_pc[pc_pos];
+}