From 413f05aaf54fa08c0ae7e997327a4f4a473c0a8d Mon Sep 17 00:00:00 2001 From: Upstream Date: Mon, 12 Jan 1970 13:46:40 +0000 Subject: external/qemu 0.8.2 --- target-arm/cpu.h | 226 ++++ target-arm/exec.h | 75 ++ target-arm/helper.c | 624 +++++++++ target-arm/nwfpe/double_cpdo.c | 296 +++++ target-arm/nwfpe/extended_cpdo.c | 273 ++++ target-arm/nwfpe/fpa11.c | 237 ++++ target-arm/nwfpe/fpa11.h | 122 ++ target-arm/nwfpe/fpa11.inl | 51 + target-arm/nwfpe/fpa11_cpdo.c | 117 ++ target-arm/nwfpe/fpa11_cpdt.c | 376 ++++++ target-arm/nwfpe/fpa11_cprt.c | 290 +++++ target-arm/nwfpe/fpopcode.c | 148 +++ target-arm/nwfpe/fpopcode.h | 390 ++++++ target-arm/nwfpe/fpsr.h | 108 ++ target-arm/nwfpe/single_cpdo.c | 255 ++++ target-arm/op.c | 1203 ++++++++++++++++++ target-arm/op_helper.c | 227 ++++ target-arm/op_mem.h | 70 ++ target-arm/op_template.h | 53 + target-arm/translate.c | 2576 ++++++++++++++++++++++++++++++++++++++ 20 files changed, 7717 insertions(+) create mode 100644 target-arm/cpu.h create mode 100644 target-arm/exec.h create mode 100644 target-arm/helper.c create mode 100644 target-arm/nwfpe/double_cpdo.c create mode 100644 target-arm/nwfpe/extended_cpdo.c create mode 100644 target-arm/nwfpe/fpa11.c create mode 100644 target-arm/nwfpe/fpa11.h create mode 100644 target-arm/nwfpe/fpa11.inl create mode 100644 target-arm/nwfpe/fpa11_cpdo.c create mode 100644 target-arm/nwfpe/fpa11_cpdt.c create mode 100644 target-arm/nwfpe/fpa11_cprt.c create mode 100644 target-arm/nwfpe/fpopcode.c create mode 100644 target-arm/nwfpe/fpopcode.h create mode 100644 target-arm/nwfpe/fpsr.h create mode 100644 target-arm/nwfpe/single_cpdo.c create mode 100644 target-arm/op.c create mode 100644 target-arm/op_helper.c create mode 100644 target-arm/op_mem.h create mode 100644 target-arm/op_template.h create mode 100644 target-arm/translate.c (limited to 'target-arm') diff --git a/target-arm/cpu.h b/target-arm/cpu.h new file mode 100644 index 0000000..75a1f13 --- /dev/null +++ b/target-arm/cpu.h @@ -0,0 +1,226 @@ +/* + * ARM virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef CPU_ARM_H +#define CPU_ARM_H + +#define TARGET_LONG_BITS 32 + +#include "cpu-defs.h" + +#include "softfloat.h" + +#define TARGET_HAS_ICE 1 + +#define EXCP_UDEF 1 /* undefined instruction */ +#define EXCP_SWI 2 /* software interrupt */ +#define EXCP_PREFETCH_ABORT 3 +#define EXCP_DATA_ABORT 4 +#define EXCP_IRQ 5 +#define EXCP_FIQ 6 +#define EXCP_BKPT 7 + +/* We currently assume float and double are IEEE single and double + precision respectively. + Doing runtime conversions is tricky because VFP registers may contain + integer values (eg. as the result of a FTOSI instruction). + s<2n> maps to the least significant half of d + s<2n+1> maps to the most significant half of d + */ + +typedef struct CPUARMState { + /* Regs for current mode. */ + uint32_t regs[16]; + /* Frequently accessed CPSR bits are stored separately for efficiently. + This contains all the other bits. Use cpsr_{read,write} to accless + the whole CPSR. */ + uint32_t uncached_cpsr; + uint32_t spsr; + + /* Banked registers. */ + uint32_t banked_spsr[6]; + uint32_t banked_r13[6]; + uint32_t banked_r14[6]; + + /* These hold r8-r12. */ + uint32_t usr_regs[5]; + uint32_t fiq_regs[5]; + + /* cpsr flag cache for faster execution */ + uint32_t CF; /* 0 or 1 */ + uint32_t VF; /* V is the bit 31. All other bits are undefined */ + uint32_t NZF; /* N is bit 31. Z is computed from NZF */ + uint32_t QF; /* 0 or 1 */ + + int thumb; /* 0 = arm mode, 1 = thumb mode */ + + /* System control coprocessor (cp15) */ + struct { + uint32_t c0_cpuid; + uint32_t c1_sys; /* System control register. */ + uint32_t c1_coproc; /* Coprocessor access register. */ + uint32_t c2; /* MMU translation table base. */ + uint32_t c3; /* MMU domain access control register. */ + uint32_t c5_insn; /* Fault status registers. */ + uint32_t c5_data; + uint32_t c6_insn; /* Fault address registers. */ + uint32_t c6_data; + uint32_t c9_insn; /* Cache lockdown registers. */ + uint32_t c9_data; + uint32_t c13_fcse; /* FCSE PID. */ + uint32_t c13_context; /* Context ID. */ + } cp15; + + /* Internal CPU feature flags. */ + uint32_t features; + + /* exception/interrupt handling */ + jmp_buf jmp_env; + int exception_index; + int interrupt_request; + int user_mode_only; + int halted; + + /* VFP coprocessor state. */ + struct { + float64 regs[16]; + + uint32_t xregs[16]; + /* We store these fpcsr fields separately for convenience. */ + int vec_len; + int vec_stride; + + /* Temporary variables if we don't have spare fp regs. */ + float32 tmp0s, tmp1s; + float64 tmp0d, tmp1d; + + float_status fp_status; + } vfp; + +#if defined(CONFIG_USER_ONLY) + /* For usermode syscall translation. */ + int eabi; +#endif + + CPU_COMMON + +} CPUARMState; + +CPUARMState *cpu_arm_init(void); +int cpu_arm_exec(CPUARMState *s); +void cpu_arm_close(CPUARMState *s); +void do_interrupt(CPUARMState *); +void switch_mode(CPUARMState *, int); + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +struct siginfo; +int cpu_arm_signal_handler(int host_signum, struct siginfo *info, + void *puc); + +#define CPSR_M (0x1f) +#define CPSR_T (1 << 5) +#define CPSR_F (1 << 6) +#define CPSR_I (1 << 7) +#define CPSR_A (1 << 8) +#define CPSR_E (1 << 9) +#define CPSR_IT_2_7 (0xfc00) +/* Bits 20-23 reserved. */ +#define CPSR_J (1 << 24) +#define CPSR_IT_0_1 (3 << 25) +#define CPSR_Q (1 << 27) +#define CPSR_NZCV (0xf << 28) + +#define CACHED_CPSR_BITS (CPSR_T | CPSR_Q | CPSR_NZCV) +/* Return the current CPSR value. */ +static inline uint32_t cpsr_read(CPUARMState *env) +{ + int ZF; + ZF = (env->NZF == 0); + return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) | + (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) + | (env->thumb << 5); +} + +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */ +static inline void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) +{ + /* NOTE: N = 1 and Z = 1 cannot be stored currently */ + if (mask & CPSR_NZCV) { + env->NZF = (val & 0xc0000000) ^ 0x40000000; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & CPSR_T) + env->thumb = ((val & CPSR_T) != 0); + + if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { + switch_mode(env, val & CPSR_M); + } + mask &= ~CACHED_CPSR_BITS; + env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); +} + +enum arm_cpu_mode { + ARM_CPU_MODE_USR = 0x10, + ARM_CPU_MODE_FIQ = 0x11, + ARM_CPU_MODE_IRQ = 0x12, + ARM_CPU_MODE_SVC = 0x13, + ARM_CPU_MODE_ABT = 0x17, + ARM_CPU_MODE_UND = 0x1b, + ARM_CPU_MODE_SYS = 0x1f +}; + +/* VFP system registers. */ +#define ARM_VFP_FPSID 0 +#define ARM_VFP_FPSCR 1 +#define ARM_VFP_FPEXC 8 +#define ARM_VFP_FPINST 9 +#define ARM_VFP_FPINST2 10 + + +enum arm_features { + ARM_FEATURE_VFP, + ARM_FEATURE_AUXCR /* ARM1026 Auxiliary control register. */ +}; + +static inline int arm_feature(CPUARMState *env, int feature) +{ + return (env->features & (1u << feature)) != 0; +} + +void cpu_arm_set_model(CPUARMState *env, uint32_t id); + +#define ARM_CPUID_ARM1026 0x4106a262 +#define ARM_CPUID_ARM926 0x41069265 + +#if defined(CONFIG_USER_ONLY) +#define TARGET_PAGE_BITS 12 +#else +/* The ARM MMU allows 1k pages. */ +/* ??? Linux doesn't actually use these, and they're deprecated in recent + architecture revisions. Maybe an a configure option to disable them. */ +#define TARGET_PAGE_BITS 10 +#endif +#include "cpu-all.h" + +#endif diff --git a/target-arm/exec.h b/target-arm/exec.h new file mode 100644 index 0000000..2d2b99a --- /dev/null +++ b/target-arm/exec.h @@ -0,0 +1,75 @@ +/* + * ARM execution defines + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "dyngen-exec.h" + +register struct CPUARMState *env asm(AREG0); +register uint32_t T0 asm(AREG1); +register uint32_t T1 asm(AREG2); +register uint32_t T2 asm(AREG3); + +/* TODO: Put these in FP regs on targets that have such things. */ +/* It is ok for FT0s and FT0d to overlap. Likewise FT1s and FT1d. */ +#define FT0s env->vfp.tmp0s +#define FT1s env->vfp.tmp1s +#define FT0d env->vfp.tmp0d +#define FT1d env->vfp.tmp1d + +#include "cpu.h" +#include "exec-all.h" + +static inline void env_to_regs(void) +{ +} + +static inline void regs_to_env(void) +{ +} + +int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + int is_user, int is_softmmu); + +#if !defined(CONFIG_USER_ONLY) +#include "softmmu_exec.h" +#endif + +/* In op_helper.c */ + +void cpu_lock(void); +void cpu_unlock(void); +void helper_set_cp15(CPUState *, uint32_t, uint32_t); +uint32_t helper_get_cp15(CPUState *, uint32_t); + +void cpu_loop_exit(void); + +void raise_exception(int); + +void do_vfp_abss(void); +void do_vfp_absd(void); +void do_vfp_negs(void); +void do_vfp_negd(void); +void do_vfp_sqrts(void); +void do_vfp_sqrtd(void); +void do_vfp_cmps(void); +void do_vfp_cmpd(void); +void do_vfp_cmpes(void); +void do_vfp_cmped(void); +void do_vfp_set_fpscr(void); +void do_vfp_get_fpscr(void); + diff --git a/target-arm/helper.c b/target-arm/helper.c new file mode 100644 index 0000000..2ed46a2 --- /dev/null +++ b/target-arm/helper.c @@ -0,0 +1,624 @@ +#include +#include +#include + +#include "cpu.h" +#include "exec-all.h" + +void cpu_reset(CPUARMState *env) +{ +#if defined (CONFIG_USER_ONLY) + env->uncached_cpsr = ARM_CPU_MODE_USR; + env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30; +#else + /* SVC mode with interrupts disabled. */ + env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I; + env->vfp.xregs[ARM_VFP_FPEXC] = 0; +#endif + env->regs[15] = 0; +} + +CPUARMState *cpu_arm_init(void) +{ + CPUARMState *env; + + env = qemu_mallocz(sizeof(CPUARMState)); + if (!env) + return NULL; + cpu_exec_init(env); + cpu_reset(env); + tlb_flush(env, 1); + return env; +} + +static inline void set_feature(CPUARMState *env, int feature) +{ + env->features |= 1u << feature; +} + +void cpu_arm_set_model(CPUARMState *env, uint32_t id) +{ + env->cp15.c0_cpuid = id; + switch (id) { + case ARM_CPUID_ARM926: + set_feature(env, ARM_FEATURE_VFP); + env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090; + break; + case ARM_CPUID_ARM1026: + set_feature(env, ARM_FEATURE_VFP); + set_feature(env, ARM_FEATURE_AUXCR); + env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0; + break; + default: + cpu_abort(env, "Bad CPU ID: %x\n", id); + break; + } +} + +void cpu_arm_close(CPUARMState *env) +{ + free(env); +} + +#if defined(CONFIG_USER_ONLY) + +void do_interrupt (CPUState *env) +{ + env->exception_index = -1; +} + +int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw, + int is_user, int is_softmmu) +{ + if (rw == 2) { + env->exception_index = EXCP_PREFETCH_ABORT; + env->cp15.c6_insn = address; + } else { + env->exception_index = EXCP_DATA_ABORT; + env->cp15.c6_data = address; + } + return 1; +} + +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +{ + return addr; +} + +/* These should probably raise undefined insn exceptions. */ +void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val) +{ + cpu_abort(env, "cp15 insn %08x\n", insn); +} + +uint32_t helper_get_cp15(CPUState *env, uint32_t insn) +{ + cpu_abort(env, "cp15 insn %08x\n", insn); + return 0; +} + +void switch_mode(CPUState *env, int mode) +{ + if (mode != ARM_CPU_MODE_USR) + cpu_abort(env, "Tried to switch out of user mode\n"); +} + +#else + +/* Map CPU modes onto saved register banks. */ +static inline int bank_number (int mode) +{ + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + return 0; + case ARM_CPU_MODE_SVC: + return 1; + case ARM_CPU_MODE_ABT: + return 2; + case ARM_CPU_MODE_UND: + return 3; + case ARM_CPU_MODE_IRQ: + return 4; + case ARM_CPU_MODE_FIQ: + return 5; + } + cpu_abort(cpu_single_env, "Bad mode %x\n", mode); + return -1; +} + +void switch_mode(CPUState *env, int mode) +{ + int old_mode; + int i; + + old_mode = env->uncached_cpsr & CPSR_M; + if (mode == old_mode) + return; + + if (old_mode == ARM_CPU_MODE_FIQ) { + memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + } else if (mode == ARM_CPU_MODE_FIQ) { + memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + } + + i = bank_number(old_mode); + env->banked_r13[i] = env->regs[13]; + env->banked_r14[i] = env->regs[14]; + env->banked_spsr[i] = env->spsr; + + i = bank_number(mode); + env->regs[13] = env->banked_r13[i]; + env->regs[14] = env->banked_r14[i]; + env->spsr = env->banked_spsr[i]; +} + +/* Handle a CPU exception. */ +void do_interrupt(CPUARMState *env) +{ + uint32_t addr; + uint32_t mask; + int new_mode; + uint32_t offset; + + /* TODO: Vectored interrupt controller. */ + switch (env->exception_index) { + case EXCP_UDEF: + new_mode = ARM_CPU_MODE_UND; + addr = 0x04; + mask = CPSR_I; + if (env->thumb) + offset = 2; + else + offset = 4; + break; + case EXCP_SWI: + new_mode = ARM_CPU_MODE_SVC; + addr = 0x08; + mask = CPSR_I; + /* The PC already points to the next instructon. */ + offset = 0; + break; + case EXCP_PREFETCH_ABORT: + case EXCP_BKPT: + new_mode = ARM_CPU_MODE_ABT; + addr = 0x0c; + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_DATA_ABORT: + new_mode = ARM_CPU_MODE_ABT; + addr = 0x10; + mask = CPSR_A | CPSR_I; + offset = 8; + break; + case EXCP_IRQ: + new_mode = ARM_CPU_MODE_IRQ; + addr = 0x18; + /* Disable IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_FIQ: + new_mode = ARM_CPU_MODE_FIQ; + addr = 0x1c; + /* Disable FIQ, IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 4; + break; + default: + cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + /* High vectors. */ + if (env->cp15.c1_sys & (1 << 13)) { + addr += 0xffff0000; + } + switch_mode (env, new_mode); + env->spsr = cpsr_read(env); + /* Switch to the new mode, and switch to Arm mode. */ + /* ??? Thumb interrupt handlers not implemented. */ + env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; + env->uncached_cpsr |= mask; + env->thumb = 0; + env->regs[14] = env->regs[15] + offset; + env->regs[15] = addr; + env->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +/* Check section/page access permissions. + Returns the page protection flags, or zero if the access is not + permitted. */ +static inline int check_ap(CPUState *env, int ap, int domain, int access_type, + int is_user) +{ + if (domain == 3) + return PAGE_READ | PAGE_WRITE; + + switch (ap) { + case 0: + if (access_type != 1) + return 0; + switch ((env->cp15.c1_sys >> 8) & 3) { + case 1: + return is_user ? 0 : PAGE_READ; + case 2: + return PAGE_READ; + default: + return 0; + } + case 1: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 2: + if (is_user) + return (access_type == 1) ? 0 : PAGE_READ; + else + return PAGE_READ | PAGE_WRITE; + case 3: + return PAGE_READ | PAGE_WRITE; + default: + abort(); + } +} + +static int get_phys_addr(CPUState *env, uint32_t address, int access_type, + int is_user, uint32_t *phys_ptr, int *prot) +{ + int code; + uint32_t table; + uint32_t desc; + int type; + int ap; + int domain; + uint32_t phys_addr; + + /* Fast Context Switch Extension. */ + if (address < 0x02000000) + address += env->cp15.c13_fcse; + + if ((env->cp15.c1_sys & 1) == 0) { + /* MMU diusabled. */ + *phys_ptr = address; + *prot = PAGE_READ | PAGE_WRITE; + } else { + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc); + desc = ldl_phys(table); + type = (desc & 3); + domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3; + if (type == 0) { + /* Secton translation fault. */ + code = 5; + goto do_fault; + } + if (domain == 0 || domain == 2) { + if (type == 2) + code = 9; /* Section domain fault. */ + else + code = 11; /* Page domain fault. */ + goto do_fault; + } + if (type == 2) { + /* 1Mb section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + ap = (desc >> 10) & 3; + code = 13; + } else { + /* Lookup l2 entry. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + desc = ldl_phys(table); + switch (desc & 3) { + case 0: /* Page translation fault. */ + code = 7; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + break; + case 2: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + break; + case 3: /* 1k page. */ + if (type == 1) { + /* Page translation fault. */ + code = 7; + goto do_fault; + } + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); + ap = (desc >> 4) & 3; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + code = 15; + } + *prot = check_ap(env, ap, domain, access_type, is_user); + if (!*prot) { + /* Access permission fault. */ + goto do_fault; + } + *phys_ptr = phys_addr; + } + return 0; +do_fault: + return code | (domain << 4); +} + +int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, + int access_type, int is_user, int is_softmmu) +{ + uint32_t phys_addr; + int prot; + int ret; + + ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot); + if (ret == 0) { + /* Map a single [sub]page. */ + phys_addr &= ~(uint32_t)0x3ff; + address &= ~(uint32_t)0x3ff; + return tlb_set_page (env, address, phys_addr, prot, is_user, + is_softmmu); + } + + if (access_type == 2) { + env->cp15.c5_insn = ret; + env->cp15.c6_insn = address; + env->exception_index = EXCP_PREFETCH_ABORT; + } else { + env->cp15.c5_data = ret; + env->cp15.c6_data = address; + env->exception_index = EXCP_DATA_ABORT; + } + return 1; +} + +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +{ + uint32_t phys_addr; + int prot; + int ret; + + ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot); + + if (ret != 0) + return -1; + + return phys_addr; +} + +void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val) +{ + uint32_t op2; + + op2 = (insn >> 5) & 7; + switch ((insn >> 16) & 0xf) { + case 0: /* ID codes. */ + goto bad_reg; + case 1: /* System configuration. */ + switch (op2) { + case 0: + env->cp15.c1_sys = val; + /* ??? Lots of these bits are not implemented. */ + /* This may enable/disable the MMU, so do a TLB flush. */ + tlb_flush(env, 1); + break; + case 2: + env->cp15.c1_coproc = val; + /* ??? Is this safe when called from within a TB? */ + tb_flush(env); + default: + goto bad_reg; + } + break; + case 2: /* MMU Page table control. */ + env->cp15.c2 = val; + break; + case 3: /* MMU Domain access control. */ + env->cp15.c3 = val; + break; + case 4: /* Reserved. */ + goto bad_reg; + case 5: /* MMU Fault status. */ + switch (op2) { + case 0: + env->cp15.c5_data = val; + break; + case 1: + env->cp15.c5_insn = val; + break; + default: + goto bad_reg; + } + break; + case 6: /* MMU Fault address. */ + switch (op2) { + case 0: + env->cp15.c6_data = val; + break; + case 1: + env->cp15.c6_insn = val; + break; + default: + goto bad_reg; + } + break; + case 7: /* Cache control. */ + /* No cache, so nothing to do. */ + break; + case 8: /* MMU TLB control. */ + switch (op2) { + case 0: /* Invalidate all. */ + tlb_flush(env, 0); + break; + case 1: /* Invalidate single TLB entry. */ +#if 0 + /* ??? This is wrong for large pages and sections. */ + /* As an ugly hack to make linux work we always flush a 4K + pages. */ + val &= 0xfffff000; + tlb_flush_page(env, val); + tlb_flush_page(env, val + 0x400); + tlb_flush_page(env, val + 0x800); + tlb_flush_page(env, val + 0xc00); +#else + tlb_flush(env, 1); +#endif + break; + default: + goto bad_reg; + } + break; + case 9: /* Cache lockdown. */ + switch (op2) { + case 0: + env->cp15.c9_data = val; + break; + case 1: + env->cp15.c9_insn = val; + break; + default: + goto bad_reg; + } + break; + case 10: /* MMU TLB lockdown. */ + /* ??? TLB lockdown not implemented. */ + break; + case 11: /* TCM DMA control. */ + case 12: /* Reserved. */ + goto bad_reg; + case 13: /* Process ID. */ + switch (op2) { + case 0: + /* Unlike real hardware the qemu TLB uses virtual addresses, + not modified virtual addresses, so this causes a TLB flush. + */ + if (env->cp15.c13_fcse != val) + tlb_flush(env, 1); + env->cp15.c13_fcse = val; + break; + case 1: + /* This changes the ASID, so do a TLB flush. */ + if (env->cp15.c13_context != val) + tlb_flush(env, 0); + env->cp15.c13_context = val; + break; + default: + goto bad_reg; + } + break; + case 14: /* Reserved. */ + goto bad_reg; + case 15: /* Implementation specific. */ + /* ??? Internal registers not implemented. */ + break; + } + return; +bad_reg: + /* ??? For debugging only. Should raise illegal instruction exception. */ + cpu_abort(env, "Unimplemented cp15 register read\n"); +} + +uint32_t helper_get_cp15(CPUState *env, uint32_t insn) +{ + uint32_t op2; + + op2 = (insn >> 5) & 7; + switch ((insn >> 16) & 0xf) { + case 0: /* ID codes. */ + switch (op2) { + default: /* Device ID. */ + return env->cp15.c0_cpuid; + case 1: /* Cache Type. */ + return 0x1dd20d2; + case 2: /* TCM status. */ + return 0; + } + case 1: /* System configuration. */ + switch (op2) { + case 0: /* Control register. */ + return env->cp15.c1_sys; + case 1: /* Auxiliary control register. */ + if (arm_feature(env, ARM_FEATURE_AUXCR)) + return 1; + goto bad_reg; + case 2: /* Coprocessor access register. */ + return env->cp15.c1_coproc; + default: + goto bad_reg; + } + case 2: /* MMU Page table control. */ + return env->cp15.c2; + case 3: /* MMU Domain access control. */ + return env->cp15.c3; + case 4: /* Reserved. */ + goto bad_reg; + case 5: /* MMU Fault status. */ + switch (op2) { + case 0: + return env->cp15.c5_data; + case 1: + return env->cp15.c5_insn; + default: + goto bad_reg; + } + case 6: /* MMU Fault address. */ + switch (op2) { + case 0: + return env->cp15.c6_data; + case 1: + /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't + do any harm. */ + return env->cp15.c6_insn; + default: + goto bad_reg; + } + case 7: /* Cache control. */ + /* ??? This is for test, clean and invaidate operations that set the + Z flag. We can't represent N = Z = 1, so it also clears clears + the N flag. Oh well. */ + env->NZF = 0; + return 0; + case 8: /* MMU TLB control. */ + goto bad_reg; + case 9: /* Cache lockdown. */ + switch (op2) { + case 0: + return env->cp15.c9_data; + case 1: + return env->cp15.c9_insn; + default: + goto bad_reg; + } + case 10: /* MMU TLB lockdown. */ + /* ??? TLB lockdown not implemented. */ + return 0; + case 11: /* TCM DMA control. */ + case 12: /* Reserved. */ + goto bad_reg; + case 13: /* Process ID. */ + switch (op2) { + case 0: + return env->cp15.c13_fcse; + case 1: + return env->cp15.c13_context; + default: + goto bad_reg; + } + case 14: /* Reserved. */ + goto bad_reg; + case 15: /* Implementation specific. */ + /* ??? Internal registers not implemented. */ + return 0; + } +bad_reg: + /* ??? For debugging only. Should raise illegal instruction exception. */ + cpu_abort(env, "Unimplemented cp15 register read\n"); + return 0; +} + +#endif diff --git a/target-arm/nwfpe/double_cpdo.c b/target-arm/nwfpe/double_cpdo.c new file mode 100644 index 0000000..944083a --- /dev/null +++ b/target-arm/nwfpe/double_cpdo.c @@ -0,0 +1,296 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +float64 float64_exp(float64 Fm); +float64 float64_ln(float64 Fm); +float64 float64_sin(float64 rFm); +float64 float64_cos(float64 rFm); +float64 float64_arcsin(float64 rFm); +float64 float64_arctan(float64 rFm); +float64 float64_log(float64 rFm); +float64 float64_tan(float64 rFm); +float64 float64_arccos(float64 rFm); +float64 float64_pow(float64 rFn,float64 rFm); +float64 float64_pol(float64 rFn,float64 rFm); + +unsigned int DoubleCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + float64 rFm, rFn = 0; + unsigned int Fd, Fm, Fn, nRc = 1; + + //printk("DoubleCPDO(0x%08x)\n",opcode); + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getDoubleConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = float32_to_float64(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + rFm = fpa11->fpreg[Fm].fDouble; + break; + + case typeExtended: + // !! patb + //printk("not implemented! why not?\n"); + //!! ScottB + // should never get here, if extended involved + // then other operand should be promoted then + // ExtendedCPDO called. + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = float32_to_float64(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + rFn = fpa11->fpreg[Fn].fDouble; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + /* !! this switch isn't optimized; better (opcode & MASK_ARITHMETIC_OPCODE)>>24, sort of */ + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fDouble = float64_add(rFn,rFm, &fpa11->fp_status); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fDouble = float64_mul(rFn,rFm, &fpa11->fp_status); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fDouble = float64_sub(rFn,rFm, &fpa11->fp_status); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fDouble = float64_sub(rFm,rFn, &fpa11->fp_status); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fDouble = float64_div(rFn,rFm, &fpa11->fp_status); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fDouble = float64_div(rFm,rFn, &fpa11->fp_status); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fDouble = float64_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fDouble = float64_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fDouble = float64_rem(rFn,rFm, &fpa11->fp_status); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fDouble = float64_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fDouble = rFm; + break; + + case MNF_CODE: + { + unsigned int *p = (unsigned int*)&rFm; +#ifdef WORDS_BIGENDIAN + p[0] ^= 0x80000000; +#else + p[1] ^= 0x80000000; +#endif + fpa11->fpreg[Fd].fDouble = rFm; + } + break; + + case ABS_CODE: + { + unsigned int *p = (unsigned int*)&rFm; +#ifdef WORDS_BIGENDIAN + p[0] &= 0x7fffffff; +#else + p[1] &= 0x7fffffff; +#endif + fpa11->fpreg[Fd].fDouble = rFm; + } + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fDouble = float64_round_to_int(rFm, &fpa11->fp_status); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fDouble = float64_sqrt(rFm, &fpa11->fp_status); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fDouble = float64_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fDouble = float64_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fDouble = float64_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fDouble = float64_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fDouble = float64_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fDouble = float64_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fDouble = float64_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fDouble = float64_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fDouble = float64_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeDouble; + return nRc; +} + +#if 0 +float64 float64_exp(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_ln(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_sin(float64 rFm) +{ + return rFm; +//series +} + +float64 float64_cos(float64 rFm) +{ + return rFm; + //series +} + +#if 0 +float64 float64_arcsin(float64 rFm) +{ +//series +} + +float64 float64_arctan(float64 rFm) +{ + //series +} +#endif + +float64 float64_log(float64 rFm) +{ + return float64_div(float64_ln(rFm),getDoubleConstant(7)); +} + +float64 float64_tan(float64 rFm) +{ + return float64_div(float64_sin(rFm),float64_cos(rFm)); +} + +float64 float64_arccos(float64 rFm) +{ +return rFm; + //return float64_sub(halfPi,float64_arcsin(rFm)); +} + +float64 float64_pow(float64 rFn,float64 rFm) +{ + return float64_exp(float64_mul(rFm,float64_ln(rFn))); +} + +float64 float64_pol(float64 rFn,float64 rFm) +{ + return float64_arctan(float64_div(rFn,rFm)); +} +#endif diff --git a/target-arm/nwfpe/extended_cpdo.c b/target-arm/nwfpe/extended_cpdo.c new file mode 100644 index 0000000..f5ef623 --- /dev/null +++ b/target-arm/nwfpe/extended_cpdo.c @@ -0,0 +1,273 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +floatx80 floatx80_exp(floatx80 Fm); +floatx80 floatx80_ln(floatx80 Fm); +floatx80 floatx80_sin(floatx80 rFm); +floatx80 floatx80_cos(floatx80 rFm); +floatx80 floatx80_arcsin(floatx80 rFm); +floatx80 floatx80_arctan(floatx80 rFm); +floatx80 floatx80_log(floatx80 rFm); +floatx80 floatx80_tan(floatx80 rFm); +floatx80 floatx80_arccos(floatx80 rFm); +floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm); +floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm); + +unsigned int ExtendedCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + floatx80 rFm, rFn; + unsigned int Fd, Fm, Fn, nRc = 1; + + //printk("ExtendedCPDO(0x%08x)\n",opcode); + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getExtendedConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble, &fpa11->fp_status); + break; + + case typeExtended: + rFm = fpa11->fpreg[Fm].fExtended; + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status); + break; + + case typeExtended: + rFn = fpa11->fpreg[Fn].fExtended; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_add(rFn,rFm, &fpa11->fp_status); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_mul(rFn,rFm, &fpa11->fp_status); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sub(rFn,rFm, &fpa11->fp_status); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sub(rFm,rFn, &fpa11->fp_status); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_div(rFn,rFm, &fpa11->fp_status); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_div(rFm,rFn, &fpa11->fp_status); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_rem(rFn,rFm, &fpa11->fp_status); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case MNF_CODE: + rFm.high ^= 0x8000; + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case ABS_CODE: + rFm.high &= 0x7fff; + fpa11->fpreg[Fd].fExtended = rFm; + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_round_to_int(rFm, &fpa11->fp_status); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sqrt(rFm, &fpa11->fp_status); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fExtended = floatx80_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeExtended; + return nRc; +} + +#if 0 +floatx80 floatx80_exp(floatx80 Fm) +{ +//series +} + +floatx80 floatx80_ln(floatx80 Fm) +{ +//series +} + +floatx80 floatx80_sin(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_cos(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_arcsin(floatx80 rFm) +{ +//series +} + +floatx80 floatx80_arctan(floatx80 rFm) +{ + //series +} + +floatx80 floatx80_log(floatx80 rFm) +{ + return floatx80_div(floatx80_ln(rFm),getExtendedConstant(7)); +} + +floatx80 floatx80_tan(floatx80 rFm) +{ + return floatx80_div(floatx80_sin(rFm),floatx80_cos(rFm)); +} + +floatx80 floatx80_arccos(floatx80 rFm) +{ + //return floatx80_sub(halfPi,floatx80_arcsin(rFm)); +} + +floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm) +{ + return floatx80_exp(floatx80_mul(rFm,floatx80_ln(rFn))); +} + +floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm) +{ + return floatx80_arctan(floatx80_div(rFn,rFm)); +} +#endif diff --git a/target-arm/nwfpe/fpa11.c b/target-arm/nwfpe/fpa11.c new file mode 100644 index 0000000..a8141e7 --- /dev/null +++ b/target-arm/nwfpe/fpa11.c @@ -0,0 +1,237 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" + +#include "fpopcode.h" + +//#include "fpmodule.h" +//#include "fpmodule.inl" + +//#include + +#include + +/* forward declarations */ +unsigned int EmulateCPDO(const unsigned int); +unsigned int EmulateCPDT(const unsigned int); +unsigned int EmulateCPRT(const unsigned int); + +FPA11* qemufpa=0; +CPUARMState* user_registers; + +/* Reset the FPA11 chip. Called to initialize and reset the emulator. */ +void resetFPA11(void) +{ + int i; + FPA11 *fpa11 = GET_FPA11(); + + /* initialize the register type array */ + for (i=0;i<=7;i++) + { + fpa11->fType[i] = typeNone; + } + + /* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */ + fpa11->fpsr = FP_EMULATOR | BIT_AC; + + /* FPCR: set SB, AB and DA bits, clear all others */ +#if MAINTAIN_FPCR + fpa11->fpcr = MASK_RESET; +#endif +} + +void SetRoundingMode(const unsigned int opcode) +{ + int rounding_mode; + FPA11 *fpa11 = GET_FPA11(); + +#if MAINTAIN_FPCR + fpa11->fpcr &= ~MASK_ROUNDING_MODE; +#endif + switch (opcode & MASK_ROUNDING_MODE) + { + default: + case ROUND_TO_NEAREST: + rounding_mode = float_round_nearest_even; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_NEAREST; +#endif + break; + + case ROUND_TO_PLUS_INFINITY: + rounding_mode = float_round_up; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_PLUS_INFINITY; +#endif + break; + + case ROUND_TO_MINUS_INFINITY: + rounding_mode = float_round_down; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_MINUS_INFINITY; +#endif + break; + + case ROUND_TO_ZERO: + rounding_mode = float_round_to_zero; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_TO_ZERO; +#endif + break; + } + set_float_rounding_mode(rounding_mode, &fpa11->fp_status); +} + +void SetRoundingPrecision(const unsigned int opcode) +{ + int rounding_precision; + FPA11 *fpa11 = GET_FPA11(); +#if MAINTAIN_FPCR + fpa11->fpcr &= ~MASK_ROUNDING_PRECISION; +#endif + switch (opcode & MASK_ROUNDING_PRECISION) + { + case ROUND_SINGLE: + rounding_precision = 32; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_SINGLE; +#endif + break; + + case ROUND_DOUBLE: + rounding_precision = 64; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_DOUBLE; +#endif + break; + + case ROUND_EXTENDED: + rounding_precision = 80; +#if MAINTAIN_FPCR + fpa11->fpcr |= ROUND_EXTENDED; +#endif + break; + + default: rounding_precision = 80; + } + set_floatx80_rounding_precision(rounding_precision, &fpa11->fp_status); +} + +/* Emulate the instruction in the opcode. */ +/* ??? This is not thread safe. */ +unsigned int EmulateAll(unsigned int opcode, FPA11* qfpa, CPUARMState* qregs) +{ + unsigned int nRc = 0; +// unsigned long flags; + FPA11 *fpa11; +// save_flags(flags); sti(); + + qemufpa=qfpa; + user_registers=qregs; + +#if 0 + fprintf(stderr,"emulating FP insn 0x%08x, PC=0x%08x\n", + opcode, qregs[REG_PC]); +#endif + fpa11 = GET_FPA11(); + + if (fpa11->initflag == 0) /* good place for __builtin_expect */ + { + resetFPA11(); + SetRoundingMode(ROUND_TO_NEAREST); + SetRoundingPrecision(ROUND_EXTENDED); + fpa11->initflag = 1; + } + + if (TEST_OPCODE(opcode,MASK_CPRT)) + { + //fprintf(stderr,"emulating CPRT\n"); + /* Emulate conversion opcodes. */ + /* Emulate register transfer opcodes. */ + /* Emulate comparison opcodes. */ + nRc = EmulateCPRT(opcode); + } + else if (TEST_OPCODE(opcode,MASK_CPDO)) + { + //fprintf(stderr,"emulating CPDO\n"); + /* Emulate monadic arithmetic opcodes. */ + /* Emulate dyadic arithmetic opcodes. */ + nRc = EmulateCPDO(opcode); + } + else if (TEST_OPCODE(opcode,MASK_CPDT)) + { + //fprintf(stderr,"emulating CPDT\n"); + /* Emulate load/store opcodes. */ + /* Emulate load/store multiple opcodes. */ + nRc = EmulateCPDT(opcode); + } + else + { + /* Invalid instruction detected. Return FALSE. */ + nRc = 0; + } + +// restore_flags(flags); + + //printf("returning %d\n",nRc); + return(nRc); +} + +#if 0 +unsigned int EmulateAll1(unsigned int opcode) +{ + switch ((opcode >> 24) & 0xf) + { + case 0xc: + case 0xd: + if ((opcode >> 20) & 0x1) + { + switch ((opcode >> 8) & 0xf) + { + case 0x1: return PerformLDF(opcode); break; + case 0x2: return PerformLFM(opcode); break; + default: return 0; + } + } + else + { + switch ((opcode >> 8) & 0xf) + { + case 0x1: return PerformSTF(opcode); break; + case 0x2: return PerformSFM(opcode); break; + default: return 0; + } + } + break; + + case 0xe: + if (opcode & 0x10) + return EmulateCPDO(opcode); + else + return EmulateCPRT(opcode); + break; + + default: return 0; + } +} +#endif + diff --git a/target-arm/nwfpe/fpa11.h b/target-arm/nwfpe/fpa11.h new file mode 100644 index 0000000..8751696 --- /dev/null +++ b/target-arm/nwfpe/fpa11.h @@ -0,0 +1,122 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPA11_H__ +#define __FPA11_H__ + +#include +#include +#include + +#include + +#define GET_FPA11() (qemufpa) + +/* + * The processes registers are always at the very top of the 8K + * stack+task struct. Use the same method as 'current' uses to + * reach them. + */ +extern CPUARMState *user_registers; + +#define GET_USERREG() (user_registers) + +/* Need task_struct */ +//#include + +/* includes */ +#include "fpsr.h" /* FP control and status register definitions */ +#include "softfloat.h" + +#define typeNone 0x00 +#define typeSingle 0x01 +#define typeDouble 0x02 +#define typeExtended 0x03 + +/* + * This must be no more and no less than 12 bytes. + */ +typedef union tagFPREG { + floatx80 fExtended; + float64 fDouble; + float32 fSingle; +} FPREG; + +/* + * FPA11 device model. + * + * This structure is exported to user space. Do not re-order. + * Only add new stuff to the end, and do not change the size of + * any element. Elements of this structure are used by user + * space, and must match struct user_fp in include/asm-arm/user.h. + * We include the byte offsets below for documentation purposes. + * + * The size of this structure and FPREG are checked by fpmodule.c + * on initialisation. If the rules have been broken, NWFPE will + * not initialise. + */ +typedef struct tagFPA11 { +/* 0 */ FPREG fpreg[8]; /* 8 floating point registers */ +/* 96 */ FPSR fpsr; /* floating point status register */ +/* 100 */ FPCR fpcr; /* floating point control register */ +/* 104 */ unsigned char fType[8]; /* type of floating point value held in + floating point registers. One of none + single, double or extended. */ +/* 112 */ int initflag; /* this is special. The kernel guarantees + to set it to 0 when a thread is launched, + so we can use it to detect whether this + instance of the emulator needs to be + initialised. */ + float_status fp_status; /* QEMU float emulator status */ +} FPA11; + +extern FPA11* qemufpa; + +extern void resetFPA11(void); +extern void SetRoundingMode(const unsigned int); +extern void SetRoundingPrecision(const unsigned int); + +static inline unsigned int readRegister(unsigned int reg) +{ + return (user_registers->regs[(reg)]); +} + +static inline void writeRegister(unsigned int x, unsigned int y) +{ +#if 0 + printf("writing %d to r%d\n",y,x); +#endif + user_registers->regs[(x)]=(y); +} + +static inline void writeConditionCodes(unsigned int x) +{ + cpsr_write(user_registers,x,CPSR_NZCV); +} + +#define REG_PC 15 + +unsigned int EmulateAll(unsigned int opcode, FPA11* qfpa, CPUARMState* qregs); + +/* included only for get_user/put_user macros */ +#include "qemu.h" + +#endif diff --git a/target-arm/nwfpe/fpa11.inl b/target-arm/nwfpe/fpa11.inl new file mode 100644 index 0000000..7183ec9 --- /dev/null +++ b/target-arm/nwfpe/fpa11.inl @@ -0,0 +1,51 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" + +/* Read and write floating point status register */ +static inline unsigned int readFPSR(void) +{ + FPA11 *fpa11 = GET_FPA11(); + return(fpa11->fpsr); +} + +static inline void writeFPSR(FPSR reg) +{ + FPA11 *fpa11 = GET_FPA11(); + /* the sysid byte in the status register is readonly */ + fpa11->fpsr = (fpa11->fpsr & MASK_SYSID) | (reg & ~MASK_SYSID); +} + +/* Read and write floating point control register */ +static inline FPCR readFPCR(void) +{ + FPA11 *fpa11 = GET_FPA11(); + /* clear SB, AB and DA bits before returning FPCR */ + return(fpa11->fpcr & ~MASK_RFC); +} + +static inline void writeFPCR(FPCR reg) +{ + FPA11 *fpa11 = GET_FPA11(); + fpa11->fpcr &= ~MASK_WFC; /* clear SB, AB and DA bits */ + fpa11->fpcr |= (reg & MASK_WFC); /* write SB, AB and DA bits */ +} diff --git a/target-arm/nwfpe/fpa11_cpdo.c b/target-arm/nwfpe/fpa11_cpdo.c new file mode 100644 index 0000000..cc8aa87 --- /dev/null +++ b/target-arm/nwfpe/fpa11_cpdo.c @@ -0,0 +1,117 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "fpopcode.h" + +unsigned int SingleCPDO(const unsigned int opcode); +unsigned int DoubleCPDO(const unsigned int opcode); +unsigned int ExtendedCPDO(const unsigned int opcode); + +unsigned int EmulateCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int Fd, nType, nDest, nRc = 1; + + //printk("EmulateCPDO(0x%08x)\n",opcode); + + /* Get the destination size. If not valid let Linux perform + an invalid instruction trap. */ + nDest = getDestinationSize(opcode); + if (typeNone == nDest) return 0; + + SetRoundingMode(opcode); + + /* Compare the size of the operands in Fn and Fm. + Choose the largest size and perform operations in that size, + in order to make use of all the precision of the operands. + If Fm is a constant, we just grab a constant of a size + matching the size of the operand in Fn. */ + if (MONADIC_INSTRUCTION(opcode)) + nType = nDest; + else + nType = fpa11->fType[getFn(opcode)]; + + if (!CONSTANT_FM(opcode)) + { + register unsigned int Fm = getFm(opcode); + if (nType < fpa11->fType[Fm]) + { + nType = fpa11->fType[Fm]; + } + } + + switch (nType) + { + case typeSingle : nRc = SingleCPDO(opcode); break; + case typeDouble : nRc = DoubleCPDO(opcode); break; + case typeExtended : nRc = ExtendedCPDO(opcode); break; + default : nRc = 0; + } + + /* If the operation succeeded, check to see if the result in the + destination register is the correct size. If not force it + to be. */ + Fd = getFd(opcode); + nType = fpa11->fType[Fd]; + if ((0 != nRc) && (nDest != nType)) + { + switch (nDest) + { + case typeSingle: + { + if (typeDouble == nType) + fpa11->fpreg[Fd].fSingle = + float64_to_float32(fpa11->fpreg[Fd].fDouble, &fpa11->fp_status); + else + fpa11->fpreg[Fd].fSingle = + floatx80_to_float32(fpa11->fpreg[Fd].fExtended, &fpa11->fp_status); + } + break; + + case typeDouble: + { + if (typeSingle == nType) + fpa11->fpreg[Fd].fDouble = + float32_to_float64(fpa11->fpreg[Fd].fSingle, &fpa11->fp_status); + else + fpa11->fpreg[Fd].fDouble = + floatx80_to_float64(fpa11->fpreg[Fd].fExtended, &fpa11->fp_status); + } + break; + + case typeExtended: + { + if (typeSingle == nType) + fpa11->fpreg[Fd].fExtended = + float32_to_floatx80(fpa11->fpreg[Fd].fSingle, &fpa11->fp_status); + else + fpa11->fpreg[Fd].fExtended = + float64_to_floatx80(fpa11->fpreg[Fd].fDouble, &fpa11->fp_status); + } + break; + } + + fpa11->fType[Fd] = nDest; + } + + return nRc; +} diff --git a/target-arm/nwfpe/fpa11_cpdt.c b/target-arm/nwfpe/fpa11_cpdt.c new file mode 100644 index 0000000..914a86f --- /dev/null +++ b/target-arm/nwfpe/fpa11_cpdt.c @@ -0,0 +1,376 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + (c) Philip Blundell, 1998 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" +//#include "fpmodule.h" +//#include "fpmodule.inl" + +//#include + +static inline +void loadSingle(const unsigned int Fn,const unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + fpa11->fType[Fn] = typeSingle; + fpa11->fpreg[Fn].fSingle = tget32(addr); +} + +static inline +void loadDouble(const unsigned int Fn,const unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + unsigned int *p; + p = (unsigned int*)&fpa11->fpreg[Fn].fDouble; + fpa11->fType[Fn] = typeDouble; +#ifdef WORDS_BIGENDIAN + p[0] = tget32(addr); /* sign & exponent */ + p[1] = tget32(addr + 4); +#else + p[0] = tget32(addr + 4); + p[1] = tget32(addr); /* sign & exponent */ +#endif +} + +static inline +void loadExtended(const unsigned int Fn,const unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + unsigned int *p; + p = (unsigned int*)&fpa11->fpreg[Fn].fExtended; + fpa11->fType[Fn] = typeExtended; + p[0] = tget32(addr); /* sign & exponent */ + p[1] = tget32(addr + 8); /* ls bits */ + p[2] = tget32(addr + 4); /* ms bits */ +} + +static inline +void loadMultiple(const unsigned int Fn,const unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + register unsigned int *p; + unsigned long x; + + p = (unsigned int*)&(fpa11->fpreg[Fn]); + x = tget32(addr); + fpa11->fType[Fn] = (x >> 14) & 0x00000003; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + case typeDouble: + { + p[0] = tget32(addr + 8); /* Single */ + p[1] = tget32(addr + 4); /* double msw */ + p[2] = 0; /* empty */ + } + break; + + case typeExtended: + { + p[1] = tget32(addr + 8); + p[2] = tget32(addr + 4); /* msw */ + p[0] = (x & 0x80003fff); + } + break; + } +} + +static inline +void storeSingle(const unsigned int Fn,unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + float32 val; + register unsigned int *p = (unsigned int*)&val; + + switch (fpa11->fType[Fn]) + { + case typeDouble: + val = float64_to_float32(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status); + break; + + case typeExtended: + val = floatx80_to_float32(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status); + break; + + default: val = fpa11->fpreg[Fn].fSingle; + } + + tput32(addr, p[0]); +} + +static inline +void storeDouble(const unsigned int Fn,unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + float64 val; + register unsigned int *p = (unsigned int*)&val; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + val = float32_to_float64(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); + break; + + case typeExtended: + val = floatx80_to_float64(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status); + break; + + default: val = fpa11->fpreg[Fn].fDouble; + } +#ifdef WORDS_BIGENDIAN + tput32(addr, p[0]); /* msw */ + tput32(addr + 4, p[1]); /* lsw */ +#else + tput32(addr, p[1]); /* msw */ + tput32(addr + 4, p[0]); /* lsw */ +#endif +} + +static inline +void storeExtended(const unsigned int Fn,unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + floatx80 val; + register unsigned int *p = (unsigned int*)&val; + + switch (fpa11->fType[Fn]) + { + case typeSingle: + val = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + val = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status); + break; + + default: val = fpa11->fpreg[Fn].fExtended; + } + + tput32(addr, p[0]); /* sign & exp */ + tput32(addr + 8, p[1]); + tput32(addr + 4, p[2]); /* msw */ +} + +static inline +void storeMultiple(const unsigned int Fn,unsigned int *pMem) +{ + target_ulong addr = (target_ulong)(long)pMem; + FPA11 *fpa11 = GET_FPA11(); + register unsigned int nType, *p; + + p = (unsigned int*)&(fpa11->fpreg[Fn]); + nType = fpa11->fType[Fn]; + + switch (nType) + { + case typeSingle: + case typeDouble: + { + tput32(addr + 8, p[0]); /* single */ + tput32(addr + 4, p[1]); /* double msw */ + tput32(addr, nType << 14); + } + break; + + case typeExtended: + { + tput32(addr + 4, p[2]); /* msw */ + tput32(addr + 8, p[1]); + tput32(addr, (p[0] & 0x80003fff) | (nType << 14)); + } + break; + } +} + +unsigned int PerformLDF(const unsigned int opcode) +{ + unsigned int *pBase, *pAddress, *pFinal, nRc = 1, + write_back = WRITE_BACK(opcode); + + //printk("PerformLDF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode)); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case TRANSFER_SINGLE : loadSingle(getFd(opcode),pAddress); break; + case TRANSFER_DOUBLE : loadDouble(getFd(opcode),pAddress); break; + case TRANSFER_EXTENDED: loadExtended(getFd(opcode),pAddress); break; + default: nRc = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return nRc; +} + +unsigned int PerformSTF(const unsigned int opcode) +{ + unsigned int *pBase, *pAddress, *pFinal, nRc = 1, + write_back = WRITE_BACK(opcode); + + //printk("PerformSTF(0x%08x), Fd = 0x%08x\n",opcode,getFd(opcode)); + SetRoundingMode(ROUND_TO_NEAREST); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case TRANSFER_SINGLE : storeSingle(getFd(opcode),pAddress); break; + case TRANSFER_DOUBLE : storeDouble(getFd(opcode),pAddress); break; + case TRANSFER_EXTENDED: storeExtended(getFd(opcode),pAddress); break; + default: nRc = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return nRc; +} + +unsigned int PerformLFM(const unsigned int opcode) +{ + unsigned int i, Fd, *pBase, *pAddress, *pFinal, + write_back = WRITE_BACK(opcode); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + Fd = getFd(opcode); + for (i=getRegisterCount(opcode);i>0;i--) + { + loadMultiple(Fd,pAddress); + pAddress += 3; Fd++; + if (Fd == 8) Fd = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return 1; +} + +unsigned int PerformSFM(const unsigned int opcode) +{ + unsigned int i, Fd, *pBase, *pAddress, *pFinal, + write_back = WRITE_BACK(opcode); + + pBase = (unsigned int*)readRegister(getRn(opcode)); + if (REG_PC == getRn(opcode)) + { + pBase += 2; + write_back = 0; + } + + pFinal = pBase; + if (BIT_UP_SET(opcode)) + pFinal += getOffset(opcode); + else + pFinal -= getOffset(opcode); + + if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; + + Fd = getFd(opcode); + for (i=getRegisterCount(opcode);i>0;i--) + { + storeMultiple(Fd,pAddress); + pAddress += 3; Fd++; + if (Fd == 8) Fd = 0; + } + + if (write_back) writeRegister(getRn(opcode),(unsigned int)pFinal); + return 1; +} + +#if 1 +unsigned int EmulateCPDT(const unsigned int opcode) +{ + unsigned int nRc = 0; + + //printk("EmulateCPDT(0x%08x)\n",opcode); + + if (LDF_OP(opcode)) + { + nRc = PerformLDF(opcode); + } + else if (LFM_OP(opcode)) + { + nRc = PerformLFM(opcode); + } + else if (STF_OP(opcode)) + { + nRc = PerformSTF(opcode); + } + else if (SFM_OP(opcode)) + { + nRc = PerformSFM(opcode); + } + else + { + nRc = 0; + } + + return nRc; +} +#endif diff --git a/target-arm/nwfpe/fpa11_cprt.c b/target-arm/nwfpe/fpa11_cprt.c new file mode 100644 index 0000000..91f2d80 --- /dev/null +++ b/target-arm/nwfpe/fpa11_cprt.c @@ -0,0 +1,290 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + (c) Philip Blundell, 1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" +#include "fpa11.inl" +//#include "fpmodule.h" +//#include "fpmodule.inl" + +extern flag floatx80_is_nan(floatx80); +extern flag float64_is_nan( float64); +extern flag float32_is_nan( float32); + +void SetRoundingMode(const unsigned int opcode); + +unsigned int PerformFLT(const unsigned int opcode); +unsigned int PerformFIX(const unsigned int opcode); + +static unsigned int +PerformComparison(const unsigned int opcode); + +unsigned int EmulateCPRT(const unsigned int opcode) +{ + unsigned int nRc = 1; + + //printk("EmulateCPRT(0x%08x)\n",opcode); + + if (opcode & 0x800000) + { + /* This is some variant of a comparison (PerformComparison will + sort out which one). Since most of the other CPRT + instructions are oddball cases of some sort or other it makes + sense to pull this out into a fast path. */ + return PerformComparison(opcode); + } + + /* Hint to GCC that we'd like a jump table rather than a load of CMPs */ + switch ((opcode & 0x700000) >> 20) + { + case FLT_CODE >> 20: nRc = PerformFLT(opcode); break; + case FIX_CODE >> 20: nRc = PerformFIX(opcode); break; + + case WFS_CODE >> 20: writeFPSR(readRegister(getRd(opcode))); break; + case RFS_CODE >> 20: writeRegister(getRd(opcode),readFPSR()); break; + +#if 0 /* We currently have no use for the FPCR, so there's no point + in emulating it. */ + case WFC_CODE >> 20: writeFPCR(readRegister(getRd(opcode))); + case RFC_CODE >> 20: writeRegister(getRd(opcode),readFPCR()); break; +#endif + + default: nRc = 0; + } + + return nRc; +} + +unsigned int PerformFLT(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + + unsigned int nRc = 1; + SetRoundingMode(opcode); + + switch (opcode & MASK_ROUNDING_PRECISION) + { + case ROUND_SINGLE: + { + fpa11->fType[getFn(opcode)] = typeSingle; + fpa11->fpreg[getFn(opcode)].fSingle = + int32_to_float32(readRegister(getRd(opcode)), &fpa11->fp_status); + } + break; + + case ROUND_DOUBLE: + { + fpa11->fType[getFn(opcode)] = typeDouble; + fpa11->fpreg[getFn(opcode)].fDouble = + int32_to_float64(readRegister(getRd(opcode)), &fpa11->fp_status); + } + break; + + case ROUND_EXTENDED: + { + fpa11->fType[getFn(opcode)] = typeExtended; + fpa11->fpreg[getFn(opcode)].fExtended = + int32_to_floatx80(readRegister(getRd(opcode)), &fpa11->fp_status); + } + break; + + default: nRc = 0; + } + + return nRc; +} + +unsigned int PerformFIX(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int nRc = 1; + unsigned int Fn = getFm(opcode); + + SetRoundingMode(opcode); + + switch (fpa11->fType[Fn]) + { + case typeSingle: + { + writeRegister(getRd(opcode), + float32_to_int32(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status)); + } + break; + + case typeDouble: + { + //printf("F%d is 0x%" PRIx64 "\n",Fn,fpa11->fpreg[Fn].fDouble); + writeRegister(getRd(opcode), + float64_to_int32(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status)); + } + break; + + case typeExtended: + { + writeRegister(getRd(opcode), + floatx80_to_int32(fpa11->fpreg[Fn].fExtended, &fpa11->fp_status)); + } + break; + + default: nRc = 0; + } + + return nRc; +} + + +static unsigned int __inline__ +PerformComparisonOperation(floatx80 Fn, floatx80 Fm) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int flags = 0; + + /* test for less than condition */ + if (floatx80_lt(Fn,Fm, &fpa11->fp_status)) + { + flags |= CC_NEGATIVE; + } + + /* test for equal condition */ + if (floatx80_eq(Fn,Fm, &fpa11->fp_status)) + { + flags |= CC_ZERO; + } + + /* test for greater than or equal condition */ + if (floatx80_lt(Fm,Fn, &fpa11->fp_status)) + { + flags |= CC_CARRY; + } + + writeConditionCodes(flags); + return 1; +} + +/* This instruction sets the flags N, Z, C, V in the FPSR. */ + +static unsigned int PerformComparison(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + unsigned int Fn, Fm; + floatx80 rFn, rFm; + int e_flag = opcode & 0x400000; /* 1 if CxFE */ + int n_flag = opcode & 0x200000; /* 1 if CNxx */ + unsigned int flags = 0; + + //printk("PerformComparison(0x%08x)\n",opcode); + + Fn = getFn(opcode); + Fm = getFm(opcode); + + /* Check for unordered condition and convert all operands to 80-bit + format. + ?? Might be some mileage in avoiding this conversion if possible. + Eg, if both operands are 32-bit, detect this and do a 32-bit + comparison (cheaper than an 80-bit one). */ + switch (fpa11->fType[Fn]) + { + case typeSingle: + //printk("single.\n"); + if (float32_is_nan(fpa11->fpreg[Fn].fSingle)) + goto unordered; + rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + //printk("double.\n"); + if (float64_is_nan(fpa11->fpreg[Fn].fDouble)) + goto unordered; + rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status); + break; + + case typeExtended: + //printk("extended.\n"); + if (floatx80_is_nan(fpa11->fpreg[Fn].fExtended)) + goto unordered; + rFn = fpa11->fpreg[Fn].fExtended; + break; + + default: return 0; + } + + if (CONSTANT_FM(opcode)) + { + //printk("Fm is a constant: #%d.\n",Fm); + rFm = getExtendedConstant(Fm); + if (floatx80_is_nan(rFm)) + goto unordered; + } + else + { + //printk("Fm = r%d which contains a ",Fm); + switch (fpa11->fType[Fm]) + { + case typeSingle: + //printk("single.\n"); + if (float32_is_nan(fpa11->fpreg[Fm].fSingle)) + goto unordered; + rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status); + break; + + case typeDouble: + //printk("double.\n"); + if (float64_is_nan(fpa11->fpreg[Fm].fDouble)) + goto unordered; + rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble, &fpa11->fp_status); + break; + + case typeExtended: + //printk("extended.\n"); + if (floatx80_is_nan(fpa11->fpreg[Fm].fExtended)) + goto unordered; + rFm = fpa11->fpreg[Fm].fExtended; + break; + + default: return 0; + } + } + + if (n_flag) + { + rFm.high ^= 0x8000; + } + + return PerformComparisonOperation(rFn,rFm); + + unordered: + /* ?? The FPA data sheet is pretty vague about this, in particular + about whether the non-E comparisons can ever raise exceptions. + This implementation is based on a combination of what it says in + the data sheet, observation of how the Acorn emulator actually + behaves (and how programs expect it to) and guesswork. */ + flags |= CC_OVERFLOW; + flags &= ~(CC_ZERO | CC_NEGATIVE); + + if (BIT_AC & readFPSR()) flags |= CC_CARRY; + + if (e_flag) float_raise(float_flag_invalid, &fpa11->fp_status); + + writeConditionCodes(flags); + return 1; +} diff --git a/target-arm/nwfpe/fpopcode.c b/target-arm/nwfpe/fpopcode.c new file mode 100644 index 0000000..d29e913 --- /dev/null +++ b/target-arm/nwfpe/fpopcode.c @@ -0,0 +1,148 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" +#include "fpsr.h" +//#include "fpmodule.h" +//#include "fpmodule.inl" + +const floatx80 floatx80Constant[] = { + { 0x0000000000000000ULL, 0x0000}, /* extended 0.0 */ + { 0x8000000000000000ULL, 0x3fff}, /* extended 1.0 */ + { 0x8000000000000000ULL, 0x4000}, /* extended 2.0 */ + { 0xc000000000000000ULL, 0x4000}, /* extended 3.0 */ + { 0x8000000000000000ULL, 0x4001}, /* extended 4.0 */ + { 0xa000000000000000ULL, 0x4001}, /* extended 5.0 */ + { 0x8000000000000000ULL, 0x3ffe}, /* extended 0.5 */ + { 0xa000000000000000ULL, 0x4002} /* extended 10.0 */ +}; + +const float64 float64Constant[] = { + 0x0000000000000000ULL, /* double 0.0 */ + 0x3ff0000000000000ULL, /* double 1.0 */ + 0x4000000000000000ULL, /* double 2.0 */ + 0x4008000000000000ULL, /* double 3.0 */ + 0x4010000000000000ULL, /* double 4.0 */ + 0x4014000000000000ULL, /* double 5.0 */ + 0x3fe0000000000000ULL, /* double 0.5 */ + 0x4024000000000000ULL /* double 10.0 */ +}; + +const float32 float32Constant[] = { + 0x00000000, /* single 0.0 */ + 0x3f800000, /* single 1.0 */ + 0x40000000, /* single 2.0 */ + 0x40400000, /* single 3.0 */ + 0x40800000, /* single 4.0 */ + 0x40a00000, /* single 5.0 */ + 0x3f000000, /* single 0.5 */ + 0x41200000 /* single 10.0 */ +}; + +unsigned int getTransferLength(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_TRANSFER_LENGTH) + { + case 0x00000000: nRc = 1; break; /* single precision */ + case 0x00008000: nRc = 2; break; /* double precision */ + case 0x00400000: nRc = 3; break; /* extended precision */ + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getRegisterCount(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_REGISTER_COUNT) + { + case 0x00000000: nRc = 4; break; + case 0x00008000: nRc = 1; break; + case 0x00400000: nRc = 2; break; + case 0x00408000: nRc = 3; break; + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getRoundingPrecision(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_ROUNDING_PRECISION) + { + case 0x00000000: nRc = 1; break; + case 0x00000080: nRc = 2; break; + case 0x00080000: nRc = 3; break; + default: nRc = 0; + } + + return(nRc); +} + +unsigned int getDestinationSize(const unsigned int opcode) +{ + unsigned int nRc; + + switch (opcode & MASK_DESTINATION_SIZE) + { + case 0x00000000: nRc = typeSingle; break; + case 0x00000080: nRc = typeDouble; break; + case 0x00080000: nRc = typeExtended; break; + default: nRc = typeNone; + } + + return(nRc); +} + +/* condition code lookup table + index into the table is test code: EQ, NE, ... LT, GT, AL, NV + bit position in short is condition code: NZCV */ +static const unsigned short aCC[16] = { + 0xF0F0, // EQ == Z set + 0x0F0F, // NE + 0xCCCC, // CS == C set + 0x3333, // CC + 0xFF00, // MI == N set + 0x00FF, // PL + 0xAAAA, // VS == V set + 0x5555, // VC + 0x0C0C, // HI == C set && Z clear + 0xF3F3, // LS == C clear || Z set + 0xAA55, // GE == (N==V) + 0x55AA, // LT == (N!=V) + 0x0A05, // GT == (!Z && (N==V)) + 0xF5FA, // LE == (Z || (N!=V)) + 0xFFFF, // AL always + 0 // NV +}; + +unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes) +{ + return (aCC[opcode>>28] >> (ccodes>>28)) & 1; +} diff --git a/target-arm/nwfpe/fpopcode.h b/target-arm/nwfpe/fpopcode.h new file mode 100644 index 0000000..13c7419 --- /dev/null +++ b/target-arm/nwfpe/fpopcode.h @@ -0,0 +1,390 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPOPCODE_H__ +#define __FPOPCODE_H__ + +/* +ARM Floating Point Instruction Classes +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +|c o n d|1 1 0 P|U|u|W|L| Rn |v| Fd |0|0|0|1| o f f s e t | CPDT +|c o n d|1 1 0 P|U|w|W|L| Rn |x| Fd |0|0|0|1| o f f s e t | CPDT +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +|c o n d|1 1 1 0|a|b|c|d|e| Fn |j| Fd |0|0|0|1|f|g|h|0|i| Fm | CPDO +|c o n d|1 1 1 0|a|b|c|L|e| Fn | Rd |0|0|0|1|f|g|h|1|i| Fm | CPRT +|c o n d|1 1 1 0|a|b|c|1|e| Fn |1|1|1|1|0|0|0|1|f|g|h|1|i| Fm | comparisons +| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + +CPDT data transfer instructions + LDF, STF, LFM, SFM + +CPDO dyadic arithmetic instructions + ADF, MUF, SUF, RSF, DVF, RDF, + POW, RPW, RMF, FML, FDV, FRD, POL + +CPDO monadic arithmetic instructions + MVF, MNF, ABS, RND, SQT, LOG, LGN, EXP, + SIN, COS, TAN, ASN, ACS, ATN, URD, NRM + +CPRT joint arithmetic/data transfer instructions + FIX (arithmetic followed by load/store) + FLT (load/store followed by arithmetic) + CMF, CNF CMFE, CNFE (comparisons) + WFS, RFS (write/read floating point status register) + WFC, RFC (write/read floating point control register) + +cond condition codes +P pre/post index bit: 0 = postindex, 1 = preindex +U up/down bit: 0 = stack grows down, 1 = stack grows up +W write back bit: 1 = update base register (Rn) +L load/store bit: 0 = store, 1 = load +Rn base register +Rd destination/source register +Fd floating point destination register +Fn floating point source register +Fm floating point source register or floating point constant + +uv transfer length (TABLE 1) +wx register count (TABLE 2) +abcd arithmetic opcode (TABLES 3 & 4) +ef destination size (rounding precision) (TABLE 5) +gh rounding mode (TABLE 6) +j dyadic/monadic bit: 0 = dyadic, 1 = monadic +i constant bit: 1 = constant (TABLE 6) +*/ + +/* +TABLE 1 ++-------------------------+---+---+---------+---------+ +| Precision | u | v | FPSR.EP | length | ++-------------------------+---+---+---------+---------+ +| Single | 0 ü 0 | x | 1 words | +| Double | 1 ü 1 | x | 2 words | +| Extended | 1 ü 1 | x | 3 words | +| Packed decimal | 1 ü 1 | 0 | 3 words | +| Expanded packed decimal | 1 ü 1 | 1 | 4 words | ++-------------------------+---+---+---------+---------+ +Note: x = don't care +*/ + +/* +TABLE 2 ++---+---+---------------------------------+ +| w | x | Number of registers to transfer | ++---+---+---------------------------------+ +| 0 ü 1 | 1 | +| 1 ü 0 | 2 | +| 1 ü 1 | 3 | +| 0 ü 0 | 4 | ++---+---+---------------------------------+ +*/ + +/* +TABLE 3: Dyadic Floating Point Opcodes ++---+---+---+---+----------+-----------------------+-----------------------+ +| a | b | c | d | Mnemonic | Description | Operation | ++---+---+---+---+----------+-----------------------+-----------------------+ +| 0 | 0 | 0 | 0 | ADF | Add | Fd := Fn + Fm | +| 0 | 0 | 0 | 1 | MUF | Multiply | Fd := Fn * Fm | +| 0 | 0 | 1 | 0 | SUF | Subtract | Fd := Fn - Fm | +| 0 | 0 | 1 | 1 | RSF | Reverse subtract | Fd := Fm - Fn | +| 0 | 1 | 0 | 0 | DVF | Divide | Fd := Fn / Fm | +| 0 | 1 | 0 | 1 | RDF | Reverse divide | Fd := Fm / Fn | +| 0 | 1 | 1 | 0 | POW | Power | Fd := Fn ^ Fm | +| 0 | 1 | 1 | 1 | RPW | Reverse power | Fd := Fm ^ Fn | +| 1 | 0 | 0 | 0 | RMF | Remainder | Fd := IEEE rem(Fn/Fm) | +| 1 | 0 | 0 | 1 | FML | Fast Multiply | Fd := Fn * Fm | +| 1 | 0 | 1 | 0 | FDV | Fast Divide | Fd := Fn / Fm | +| 1 | 0 | 1 | 1 | FRD | Fast reverse divide | Fd := Fm / Fn | +| 1 | 1 | 0 | 0 | POL | Polar angle (ArcTan2) | Fd := arctan2(Fn,Fm) | +| 1 | 1 | 0 | 1 | | undefined instruction | trap | +| 1 | 1 | 1 | 0 | | undefined instruction | trap | +| 1 | 1 | 1 | 1 | | undefined instruction | trap | ++---+---+---+---+----------+-----------------------+-----------------------+ +Note: POW, RPW, POL are deprecated, and are available for backwards + compatibility only. +*/ + +/* +TABLE 4: Monadic Floating Point Opcodes ++---+---+---+---+----------+-----------------------+-----------------------+ +| a | b | c | d | Mnemonic | Description | Operation | ++---+---+---+---+----------+-----------------------+-----------------------+ +| 0 | 0 | 0 | 0 | MVF | Move | Fd := Fm | +| 0 | 0 | 0 | 1 | MNF | Move negated | Fd := - Fm | +| 0 | 0 | 1 | 0 | ABS | Absolute value | Fd := abs(Fm) | +| 0 | 0 | 1 | 1 | RND | Round to integer | Fd := int(Fm) | +| 0 | 1 | 0 | 0 | SQT | Square root | Fd := sqrt(Fm) | +| 0 | 1 | 0 | 1 | LOG | Log base 10 | Fd := log10(Fm) | +| 0 | 1 | 1 | 0 | LGN | Log base e | Fd := ln(Fm) | +| 0 | 1 | 1 | 1 | EXP | Exponent | Fd := e ^ Fm | +| 1 | 0 | 0 | 0 | SIN | Sine | Fd := sin(Fm) | +| 1 | 0 | 0 | 1 | COS | Cosine | Fd := cos(Fm) | +| 1 | 0 | 1 | 0 | TAN | Tangent | Fd := tan(Fm) | +| 1 | 0 | 1 | 1 | ASN | Arc Sine | Fd := arcsin(Fm) | +| 1 | 1 | 0 | 0 | ACS | Arc Cosine | Fd := arccos(Fm) | +| 1 | 1 | 0 | 1 | ATN | Arc Tangent | Fd := arctan(Fm) | +| 1 | 1 | 1 | 0 | URD | Unnormalized round | Fd := int(Fm) | +| 1 | 1 | 1 | 1 | NRM | Normalize | Fd := norm(Fm) | ++---+---+---+---+----------+-----------------------+-----------------------+ +Note: LOG, LGN, EXP, SIN, COS, TAN, ASN, ACS, ATN are deprecated, and are + available for backwards compatibility only. +*/ + +/* +TABLE 5 ++-------------------------+---+---+ +| Rounding Precision | e | f | ++-------------------------+---+---+ +| IEEE Single precision | 0 ü 0 | +| IEEE Double precision | 0 ü 1 | +| IEEE Extended precision | 1 ü 0 | +| undefined (trap) | 1 ü 1 | ++-------------------------+---+---+ +*/ + +/* +TABLE 5 ++---------------------------------+---+---+ +| Rounding Mode | g | h | ++---------------------------------+---+---+ +| Round to nearest (default) | 0 ü 0 | +| Round toward plus infinity | 0 ü 1 | +| Round toward negative infinity | 1 ü 0 | +| Round toward zero | 1 ü 1 | ++---------------------------------+---+---+ +*/ + +/* +=== +=== Definitions for load and store instructions +=== +*/ + +/* bit masks */ +#define BIT_PREINDEX 0x01000000 +#define BIT_UP 0x00800000 +#define BIT_WRITE_BACK 0x00200000 +#define BIT_LOAD 0x00100000 + +/* masks for load/store */ +#define MASK_CPDT 0x0c000000 /* data processing opcode */ +#define MASK_OFFSET 0x000000ff +#define MASK_TRANSFER_LENGTH 0x00408000 +#define MASK_REGISTER_COUNT MASK_TRANSFER_LENGTH +#define MASK_COPROCESSOR 0x00000f00 + +/* Tests for transfer length */ +#define TRANSFER_SINGLE 0x00000000 +#define TRANSFER_DOUBLE 0x00008000 +#define TRANSFER_EXTENDED 0x00400000 +#define TRANSFER_PACKED MASK_TRANSFER_LENGTH + +/* Get the coprocessor number from the opcode. */ +#define getCoprocessorNumber(opcode) ((opcode & MASK_COPROCESSOR) >> 8) + +/* Get the offset from the opcode. */ +#define getOffset(opcode) (opcode & MASK_OFFSET) + +/* Tests for specific data transfer load/store opcodes. */ +#define TEST_OPCODE(opcode,mask) (((opcode) & (mask)) == (mask)) + +#define LOAD_OP(opcode) TEST_OPCODE((opcode),MASK_CPDT | BIT_LOAD) +#define STORE_OP(opcode) ((opcode & (MASK_CPDT | BIT_LOAD)) == MASK_CPDT) + +#define LDF_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) +#define LFM_OP(opcode) (LOAD_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) +#define STF_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 1)) +#define SFM_OP(opcode) (STORE_OP(opcode) && (getCoprocessorNumber(opcode) == 2)) + +#define PREINDEXED(opcode) ((opcode & BIT_PREINDEX) != 0) +#define POSTINDEXED(opcode) ((opcode & BIT_PREINDEX) == 0) +#define BIT_UP_SET(opcode) ((opcode & BIT_UP) != 0) +#define BIT_UP_CLEAR(opcode) ((opcode & BIT_DOWN) == 0) +#define WRITE_BACK(opcode) ((opcode & BIT_WRITE_BACK) != 0) +#define LOAD(opcode) ((opcode & BIT_LOAD) != 0) +#define STORE(opcode) ((opcode & BIT_LOAD) == 0) + +/* +=== +=== Definitions for arithmetic instructions +=== +*/ +/* bit masks */ +#define BIT_MONADIC 0x00008000 +#define BIT_CONSTANT 0x00000008 + +#define CONSTANT_FM(opcode) ((opcode & BIT_CONSTANT) != 0) +#define MONADIC_INSTRUCTION(opcode) ((opcode & BIT_MONADIC) != 0) + +/* instruction identification masks */ +#define MASK_CPDO 0x0e000000 /* arithmetic opcode */ +#define MASK_ARITHMETIC_OPCODE 0x00f08000 +#define MASK_DESTINATION_SIZE 0x00080080 + +/* dyadic arithmetic opcodes. */ +#define ADF_CODE 0x00000000 +#define MUF_CODE 0x00100000 +#define SUF_CODE 0x00200000 +#define RSF_CODE 0x00300000 +#define DVF_CODE 0x00400000 +#define RDF_CODE 0x00500000 +#define POW_CODE 0x00600000 +#define RPW_CODE 0x00700000 +#define RMF_CODE 0x00800000 +#define FML_CODE 0x00900000 +#define FDV_CODE 0x00a00000 +#define FRD_CODE 0x00b00000 +#define POL_CODE 0x00c00000 +/* 0x00d00000 is an invalid dyadic arithmetic opcode */ +/* 0x00e00000 is an invalid dyadic arithmetic opcode */ +/* 0x00f00000 is an invalid dyadic arithmetic opcode */ + +/* monadic arithmetic opcodes. */ +#define MVF_CODE 0x00008000 +#define MNF_CODE 0x00108000 +#define ABS_CODE 0x00208000 +#define RND_CODE 0x00308000 +#define SQT_CODE 0x00408000 +#define LOG_CODE 0x00508000 +#define LGN_CODE 0x00608000 +#define EXP_CODE 0x00708000 +#define SIN_CODE 0x00808000 +#define COS_CODE 0x00908000 +#define TAN_CODE 0x00a08000 +#define ASN_CODE 0x00b08000 +#define ACS_CODE 0x00c08000 +#define ATN_CODE 0x00d08000 +#define URD_CODE 0x00e08000 +#define NRM_CODE 0x00f08000 + +/* +=== +=== Definitions for register transfer and comparison instructions +=== +*/ + +#define MASK_CPRT 0x0e000010 /* register transfer opcode */ +#define MASK_CPRT_CODE 0x00f00000 +#define FLT_CODE 0x00000000 +#define FIX_CODE 0x00100000 +#define WFS_CODE 0x00200000 +#define RFS_CODE 0x00300000 +#define WFC_CODE 0x00400000 +#define RFC_CODE 0x00500000 +#define CMF_CODE 0x00900000 +#define CNF_CODE 0x00b00000 +#define CMFE_CODE 0x00d00000 +#define CNFE_CODE 0x00f00000 + +/* +=== +=== Common definitions +=== +*/ + +/* register masks */ +#define MASK_Rd 0x0000f000 +#define MASK_Rn 0x000f0000 +#define MASK_Fd 0x00007000 +#define MASK_Fm 0x00000007 +#define MASK_Fn 0x00070000 + +/* condition code masks */ +#define CC_MASK 0xf0000000 +#define CC_NEGATIVE 0x80000000 +#define CC_ZERO 0x40000000 +#define CC_CARRY 0x20000000 +#define CC_OVERFLOW 0x10000000 +#define CC_EQ 0x00000000 +#define CC_NE 0x10000000 +#define CC_CS 0x20000000 +#define CC_HS CC_CS +#define CC_CC 0x30000000 +#define CC_LO CC_CC +#define CC_MI 0x40000000 +#define CC_PL 0x50000000 +#define CC_VS 0x60000000 +#define CC_VC 0x70000000 +#define CC_HI 0x80000000 +#define CC_LS 0x90000000 +#define CC_GE 0xa0000000 +#define CC_LT 0xb0000000 +#define CC_GT 0xc0000000 +#define CC_LE 0xd0000000 +#define CC_AL 0xe0000000 +#define CC_NV 0xf0000000 + +/* rounding masks/values */ +#define MASK_ROUNDING_MODE 0x00000060 +#define ROUND_TO_NEAREST 0x00000000 +#define ROUND_TO_PLUS_INFINITY 0x00000020 +#define ROUND_TO_MINUS_INFINITY 0x00000040 +#define ROUND_TO_ZERO 0x00000060 + +#define MASK_ROUNDING_PRECISION 0x00080080 +#define ROUND_SINGLE 0x00000000 +#define ROUND_DOUBLE 0x00000080 +#define ROUND_EXTENDED 0x00080000 + +/* Get the condition code from the opcode. */ +#define getCondition(opcode) (opcode >> 28) + +/* Get the source register from the opcode. */ +#define getRn(opcode) ((opcode & MASK_Rn) >> 16) + +/* Get the destination floating point register from the opcode. */ +#define getFd(opcode) ((opcode & MASK_Fd) >> 12) + +/* Get the first source floating point register from the opcode. */ +#define getFn(opcode) ((opcode & MASK_Fn) >> 16) + +/* Get the second source floating point register from the opcode. */ +#define getFm(opcode) (opcode & MASK_Fm) + +/* Get the destination register from the opcode. */ +#define getRd(opcode) ((opcode & MASK_Rd) >> 12) + +/* Get the rounding mode from the opcode. */ +#define getRoundingMode(opcode) ((opcode & MASK_ROUNDING_MODE) >> 5) + +static inline const floatx80 getExtendedConstant(const unsigned int nIndex) +{ + extern const floatx80 floatx80Constant[]; + return floatx80Constant[nIndex]; +} + +static inline const float64 getDoubleConstant(const unsigned int nIndex) +{ + extern const float64 float64Constant[]; + return float64Constant[nIndex]; +} + +static inline const float32 getSingleConstant(const unsigned int nIndex) +{ + extern const float32 float32Constant[]; + return float32Constant[nIndex]; +} + +extern unsigned int getRegisterCount(const unsigned int opcode); +extern unsigned int getDestinationSize(const unsigned int opcode); + +#endif diff --git a/target-arm/nwfpe/fpsr.h b/target-arm/nwfpe/fpsr.h new file mode 100644 index 0000000..6dafb0f --- /dev/null +++ b/target-arm/nwfpe/fpsr.h @@ -0,0 +1,108 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.com, 1998-1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#ifndef __FPSR_H__ +#define __FPSR_H__ + +/* +The FPSR is a 32 bit register consisting of 4 parts, each exactly +one byte. + + SYSTEM ID + EXCEPTION TRAP ENABLE BYTE + SYSTEM CONTROL BYTE + CUMULATIVE EXCEPTION FLAGS BYTE + +The FPCR is a 32 bit register consisting of bit flags. +*/ + +/* SYSTEM ID +------------ +Note: the system id byte is read only */ + +typedef unsigned int FPSR; /* type for floating point status register */ +typedef unsigned int FPCR; /* type for floating point control register */ + +#define MASK_SYSID 0xff000000 +#define BIT_HARDWARE 0x80000000 +#define FP_EMULATOR 0x01000000 /* System ID for emulator */ +#define FP_ACCELERATOR 0x81000000 /* System ID for FPA11 */ + +/* EXCEPTION TRAP ENABLE BYTE +----------------------------- */ + +#define MASK_TRAP_ENABLE 0x00ff0000 +#define MASK_TRAP_ENABLE_STRICT 0x001f0000 +#define BIT_IXE 0x00100000 /* inexact exception enable */ +#define BIT_UFE 0x00080000 /* underflow exception enable */ +#define BIT_OFE 0x00040000 /* overflow exception enable */ +#define BIT_DZE 0x00020000 /* divide by zero exception enable */ +#define BIT_IOE 0x00010000 /* invalid operation exception enable */ + +/* SYSTEM CONTROL BYTE +---------------------- */ + +#define MASK_SYSTEM_CONTROL 0x0000ff00 +#define MASK_TRAP_STRICT 0x00001f00 + +#define BIT_AC 0x00001000 /* use alternative C-flag definition + for compares */ +#define BIT_EP 0x00000800 /* use expanded packed decimal format */ +#define BIT_SO 0x00000400 /* select synchronous operation of FPA */ +#define BIT_NE 0x00000200 /* NaN exception bit */ +#define BIT_ND 0x00000100 /* no denormalized numbers bit */ + +/* CUMULATIVE EXCEPTION FLAGS BYTE +---------------------------------- */ + +#define MASK_EXCEPTION_FLAGS 0x000000ff +#define MASK_EXCEPTION_FLAGS_STRICT 0x0000001f + +#define BIT_IXC 0x00000010 /* inexact exception flag */ +#define BIT_UFC 0x00000008 /* underflow exception flag */ +#define BIT_OFC 0x00000004 /* overfloat exception flag */ +#define BIT_DZC 0x00000002 /* divide by zero exception flag */ +#define BIT_IOC 0x00000001 /* invalid operation exception flag */ + +/* Floating Point Control Register +----------------------------------*/ + +#define BIT_RU 0x80000000 /* rounded up bit */ +#define BIT_IE 0x10000000 /* inexact bit */ +#define BIT_MO 0x08000000 /* mantissa overflow bit */ +#define BIT_EO 0x04000000 /* exponent overflow bit */ +#define BIT_SB 0x00000800 /* store bounce */ +#define BIT_AB 0x00000400 /* arithmetic bounce */ +#define BIT_RE 0x00000200 /* rounding exception */ +#define BIT_DA 0x00000100 /* disable FPA */ + +#define MASK_OP 0x00f08010 /* AU operation code */ +#define MASK_PR 0x00080080 /* AU precision */ +#define MASK_S1 0x00070000 /* AU source register 1 */ +#define MASK_S2 0x00000007 /* AU source register 2 */ +#define MASK_DS 0x00007000 /* AU destination register */ +#define MASK_RM 0x00000060 /* AU rounding mode */ +#define MASK_ALU 0x9cfff2ff /* only ALU can write these bits */ +#define MASK_RESET 0x00000d00 /* bits set on reset, all others cleared */ +#define MASK_WFC MASK_RESET +#define MASK_RFC ~MASK_RESET + +#endif diff --git a/target-arm/nwfpe/single_cpdo.c b/target-arm/nwfpe/single_cpdo.c new file mode 100644 index 0000000..7dd2620 --- /dev/null +++ b/target-arm/nwfpe/single_cpdo.c @@ -0,0 +1,255 @@ +/* + NetWinder Floating Point Emulator + (c) Rebel.COM, 1998,1999 + + Direct questions, comments to Scott Bambrough + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include "fpa11.h" +#include "softfloat.h" +#include "fpopcode.h" + +float32 float32_exp(float32 Fm); +float32 float32_ln(float32 Fm); +float32 float32_sin(float32 rFm); +float32 float32_cos(float32 rFm); +float32 float32_arcsin(float32 rFm); +float32 float32_arctan(float32 rFm); +float32 float32_log(float32 rFm); +float32 float32_tan(float32 rFm); +float32 float32_arccos(float32 rFm); +float32 float32_pow(float32 rFn,float32 rFm); +float32 float32_pol(float32 rFn,float32 rFm); + +unsigned int SingleCPDO(const unsigned int opcode) +{ + FPA11 *fpa11 = GET_FPA11(); + float32 rFm, rFn = 0; + unsigned int Fd, Fm, Fn, nRc = 1; + + Fm = getFm(opcode); + if (CONSTANT_FM(opcode)) + { + rFm = getSingleConstant(Fm); + } + else + { + switch (fpa11->fType[Fm]) + { + case typeSingle: + rFm = fpa11->fpreg[Fm].fSingle; + break; + + default: return 0; + } + } + + if (!MONADIC_INSTRUCTION(opcode)) + { + Fn = getFn(opcode); + switch (fpa11->fType[Fn]) + { + case typeSingle: + rFn = fpa11->fpreg[Fn].fSingle; + break; + + default: return 0; + } + } + + Fd = getFd(opcode); + switch (opcode & MASK_ARITHMETIC_OPCODE) + { + /* dyadic opcodes */ + case ADF_CODE: + fpa11->fpreg[Fd].fSingle = float32_add(rFn,rFm, &fpa11->fp_status); + break; + + case MUF_CODE: + case FML_CODE: + fpa11->fpreg[Fd].fSingle = float32_mul(rFn,rFm, &fpa11->fp_status); + break; + + case SUF_CODE: + fpa11->fpreg[Fd].fSingle = float32_sub(rFn,rFm, &fpa11->fp_status); + break; + + case RSF_CODE: + fpa11->fpreg[Fd].fSingle = float32_sub(rFm,rFn, &fpa11->fp_status); + break; + + case DVF_CODE: + case FDV_CODE: + fpa11->fpreg[Fd].fSingle = float32_div(rFn,rFm, &fpa11->fp_status); + break; + + case RDF_CODE: + case FRD_CODE: + fpa11->fpreg[Fd].fSingle = float32_div(rFm,rFn, &fpa11->fp_status); + break; + +#if 0 + case POW_CODE: + fpa11->fpreg[Fd].fSingle = float32_pow(rFn,rFm); + break; + + case RPW_CODE: + fpa11->fpreg[Fd].fSingle = float32_pow(rFm,rFn); + break; +#endif + + case RMF_CODE: + fpa11->fpreg[Fd].fSingle = float32_rem(rFn,rFm, &fpa11->fp_status); + break; + +#if 0 + case POL_CODE: + fpa11->fpreg[Fd].fSingle = float32_pol(rFn,rFm); + break; +#endif + + /* monadic opcodes */ + case MVF_CODE: + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case MNF_CODE: + rFm ^= 0x80000000; + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case ABS_CODE: + rFm &= 0x7fffffff; + fpa11->fpreg[Fd].fSingle = rFm; + break; + + case RND_CODE: + case URD_CODE: + fpa11->fpreg[Fd].fSingle = float32_round_to_int(rFm, &fpa11->fp_status); + break; + + case SQT_CODE: + fpa11->fpreg[Fd].fSingle = float32_sqrt(rFm, &fpa11->fp_status); + break; + +#if 0 + case LOG_CODE: + fpa11->fpreg[Fd].fSingle = float32_log(rFm); + break; + + case LGN_CODE: + fpa11->fpreg[Fd].fSingle = float32_ln(rFm); + break; + + case EXP_CODE: + fpa11->fpreg[Fd].fSingle = float32_exp(rFm); + break; + + case SIN_CODE: + fpa11->fpreg[Fd].fSingle = float32_sin(rFm); + break; + + case COS_CODE: + fpa11->fpreg[Fd].fSingle = float32_cos(rFm); + break; + + case TAN_CODE: + fpa11->fpreg[Fd].fSingle = float32_tan(rFm); + break; + + case ASN_CODE: + fpa11->fpreg[Fd].fSingle = float32_arcsin(rFm); + break; + + case ACS_CODE: + fpa11->fpreg[Fd].fSingle = float32_arccos(rFm); + break; + + case ATN_CODE: + fpa11->fpreg[Fd].fSingle = float32_arctan(rFm); + break; +#endif + + case NRM_CODE: + break; + + default: + { + nRc = 0; + } + } + + if (0 != nRc) fpa11->fType[Fd] = typeSingle; + return nRc; +} + +#if 0 +float32 float32_exp(float32 Fm) +{ +//series +} + +float32 float32_ln(float32 Fm) +{ +//series +} + +float32 float32_sin(float32 rFm) +{ +//series +} + +float32 float32_cos(float32 rFm) +{ +//series +} + +float32 float32_arcsin(float32 rFm) +{ +//series +} + +float32 float32_arctan(float32 rFm) +{ + //series +} + +float32 float32_arccos(float32 rFm) +{ + //return float32_sub(halfPi,float32_arcsin(rFm)); +} + +float32 float32_log(float32 rFm) +{ + return float32_div(float32_ln(rFm),getSingleConstant(7)); +} + +float32 float32_tan(float32 rFm) +{ + return float32_div(float32_sin(rFm),float32_cos(rFm)); +} + +float32 float32_pow(float32 rFn,float32 rFm) +{ + return float32_exp(float32_mul(rFm,float32_ln(rFn))); +} + +float32 float32_pol(float32 rFn,float32 rFm) +{ + return float32_arctan(float32_div(rFn,rFm)); +} +#endif diff --git a/target-arm/op.c b/target-arm/op.c new file mode 100644 index 0000000..f17b812 --- /dev/null +++ b/target-arm/op.c @@ -0,0 +1,1203 @@ +/* + * ARM micro operations + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "exec.h" + +#define REGNAME r0 +#define REG (env->regs[0]) +#include "op_template.h" + +#define REGNAME r1 +#define REG (env->regs[1]) +#include "op_template.h" + +#define REGNAME r2 +#define REG (env->regs[2]) +#include "op_template.h" + +#define REGNAME r3 +#define REG (env->regs[3]) +#include "op_template.h" + +#define REGNAME r4 +#define REG (env->regs[4]) +#include "op_template.h" + +#define REGNAME r5 +#define REG (env->regs[5]) +#include "op_template.h" + +#define REGNAME r6 +#define REG (env->regs[6]) +#include "op_template.h" + +#define REGNAME r7 +#define REG (env->regs[7]) +#include "op_template.h" + +#define REGNAME r8 +#define REG (env->regs[8]) +#include "op_template.h" + +#define REGNAME r9 +#define REG (env->regs[9]) +#include "op_template.h" + +#define REGNAME r10 +#define REG (env->regs[10]) +#include "op_template.h" + +#define REGNAME r11 +#define REG (env->regs[11]) +#include "op_template.h" + +#define REGNAME r12 +#define REG (env->regs[12]) +#include "op_template.h" + +#define REGNAME r13 +#define REG (env->regs[13]) +#include "op_template.h" + +#define REGNAME r14 +#define REG (env->regs[14]) +#include "op_template.h" + +#define REGNAME r15 +#define REG (env->regs[15]) +#define SET_REG(x) REG = x & ~(uint32_t)1 +#include "op_template.h" + +void OPPROTO op_bx_T0(void) +{ + env->regs[15] = T0 & ~(uint32_t)1; + env->thumb = (T0 & 1) != 0; +} + +void OPPROTO op_movl_T0_0(void) +{ + T0 = 0; +} + +void OPPROTO op_movl_T0_im(void) +{ + T0 = PARAM1; +} + +void OPPROTO op_movl_T0_T1(void) +{ + T0 = T1; +} + +void OPPROTO op_movl_T1_im(void) +{ + T1 = PARAM1; +} + +void OPPROTO op_mov_CF_T1(void) +{ + env->CF = ((uint32_t)T1) >> 31; +} + +void OPPROTO op_movl_T2_im(void) +{ + T2 = PARAM1; +} + +void OPPROTO op_addl_T1_im(void) +{ + T1 += PARAM1; +} + +void OPPROTO op_addl_T1_T2(void) +{ + T1 += T2; +} + +void OPPROTO op_subl_T1_T2(void) +{ + T1 -= T2; +} + +void OPPROTO op_addl_T0_T1(void) +{ + T0 += T1; +} + +void OPPROTO op_addl_T0_T1_cc(void) +{ + unsigned int src1; + src1 = T0; + T0 += T1; + env->NZF = T0; + env->CF = T0 < src1; + env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); +} + +void OPPROTO op_adcl_T0_T1(void) +{ + T0 += T1 + env->CF; +} + +void OPPROTO op_adcl_T0_T1_cc(void) +{ + unsigned int src1; + src1 = T0; + if (!env->CF) { + T0 += T1; + env->CF = T0 < src1; + } else { + T0 += T1 + 1; + env->CF = T0 <= src1; + } + env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0); + env->NZF = T0; + FORCE_RET(); +} + +#define OPSUB(sub, sbc, res, T0, T1) \ + \ +void OPPROTO op_ ## sub ## l_T0_T1(void) \ +{ \ + res = T0 - T1; \ +} \ + \ +void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \ +{ \ + unsigned int src1; \ + src1 = T0; \ + T0 -= T1; \ + env->NZF = T0; \ + env->CF = src1 >= T1; \ + env->VF = (src1 ^ T1) & (src1 ^ T0); \ + res = T0; \ +} \ + \ +void OPPROTO op_ ## sbc ## l_T0_T1(void) \ +{ \ + res = T0 - T1 + env->CF - 1; \ +} \ + \ +void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \ +{ \ + unsigned int src1; \ + src1 = T0; \ + if (!env->CF) { \ + T0 = T0 - T1 - 1; \ + env->CF = src1 > T1; \ + } else { \ + T0 = T0 - T1; \ + env->CF = src1 >= T1; \ + } \ + env->VF = (src1 ^ T1) & (src1 ^ T0); \ + env->NZF = T0; \ + res = T0; \ + FORCE_RET(); \ +} + +OPSUB(sub, sbc, T0, T0, T1) + +OPSUB(rsb, rsc, T0, T1, T0) + +void OPPROTO op_andl_T0_T1(void) +{ + T0 &= T1; +} + +void OPPROTO op_xorl_T0_T1(void) +{ + T0 ^= T1; +} + +void OPPROTO op_orl_T0_T1(void) +{ + T0 |= T1; +} + +void OPPROTO op_bicl_T0_T1(void) +{ + T0 &= ~T1; +} + +void OPPROTO op_notl_T1(void) +{ + T1 = ~T1; +} + +void OPPROTO op_logic_T0_cc(void) +{ + env->NZF = T0; +} + +void OPPROTO op_logic_T1_cc(void) +{ + env->NZF = T1; +} + +#define EIP (env->regs[15]) + +void OPPROTO op_test_eq(void) +{ + if (env->NZF == 0) + GOTO_LABEL_PARAM(1);; + FORCE_RET(); +} + +void OPPROTO op_test_ne(void) +{ + if (env->NZF != 0) + GOTO_LABEL_PARAM(1);; + FORCE_RET(); +} + +void OPPROTO op_test_cs(void) +{ + if (env->CF != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_cc(void) +{ + if (env->CF == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_mi(void) +{ + if ((env->NZF & 0x80000000) != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_pl(void) +{ + if ((env->NZF & 0x80000000) == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_vs(void) +{ + if ((env->VF & 0x80000000) != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_vc(void) +{ + if ((env->VF & 0x80000000) == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_hi(void) +{ + if (env->CF != 0 && env->NZF != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_ls(void) +{ + if (env->CF == 0 || env->NZF == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_ge(void) +{ + if (((env->VF ^ env->NZF) & 0x80000000) == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_lt(void) +{ + if (((env->VF ^ env->NZF) & 0x80000000) != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_gt(void) +{ + if (env->NZF != 0 && ((env->VF ^ env->NZF) & 0x80000000) == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_test_le(void) +{ + if (env->NZF == 0 || ((env->VF ^ env->NZF) & 0x80000000) != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_goto_tb0(void) +{ + GOTO_TB(op_goto_tb0, PARAM1, 0); +} + +void OPPROTO op_goto_tb1(void) +{ + GOTO_TB(op_goto_tb1, PARAM1, 1); +} + +void OPPROTO op_exit_tb(void) +{ + EXIT_TB(); +} + +void OPPROTO op_movl_T0_cpsr(void) +{ + T0 = cpsr_read(env); + FORCE_RET(); +} + +void OPPROTO op_movl_T0_spsr(void) +{ + T0 = env->spsr; +} + +void OPPROTO op_movl_spsr_T0(void) +{ + uint32_t mask = PARAM1; + env->spsr = (env->spsr & ~mask) | (T0 & mask); +} + +void OPPROTO op_movl_cpsr_T0(void) +{ + cpsr_write(env, T0, PARAM1); + FORCE_RET(); +} + +void OPPROTO op_mul_T0_T1(void) +{ + T0 = T0 * T1; +} + +/* 64 bit unsigned mul */ +void OPPROTO op_mull_T0_T1(void) +{ + uint64_t res; + res = (uint64_t)T0 * (uint64_t)T1; + T1 = res >> 32; + T0 = res; +} + +/* 64 bit signed mul */ +void OPPROTO op_imull_T0_T1(void) +{ + uint64_t res; + res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1); + T1 = res >> 32; + T0 = res; +} + +/* 48 bit signed mul, top 32 bits */ +void OPPROTO op_imulw_T0_T1(void) +{ + uint64_t res; + res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1); + T0 = res >> 16; +} + +void OPPROTO op_addq_T0_T1(void) +{ + uint64_t res; + res = ((uint64_t)T1 << 32) | T0; + res += ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]); + T1 = res >> 32; + T0 = res; +} + +void OPPROTO op_addq_lo_T0_T1(void) +{ + uint64_t res; + res = ((uint64_t)T1 << 32) | T0; + res += (uint64_t)(env->regs[PARAM1]); + T1 = res >> 32; + T0 = res; +} + +void OPPROTO op_logicq_cc(void) +{ + env->NZF = (T1 & 0x80000000) | ((T0 | T1) != 0); +} + +/* memory access */ + +#define MEMSUFFIX _raw +#include "op_mem.h" + +#if !defined(CONFIG_USER_ONLY) +#define MEMSUFFIX _user +#include "op_mem.h" +#define MEMSUFFIX _kernel +#include "op_mem.h" +#endif + +/* shifts */ + +/* T1 based */ + +void OPPROTO op_shll_T1_im(void) +{ + T1 = T1 << PARAM1; +} + +void OPPROTO op_shrl_T1_im(void) +{ + T1 = (uint32_t)T1 >> PARAM1; +} + +void OPPROTO op_shrl_T1_0(void) +{ + T1 = 0; +} + +void OPPROTO op_sarl_T1_im(void) +{ + T1 = (int32_t)T1 >> PARAM1; +} + +void OPPROTO op_sarl_T1_0(void) +{ + T1 = (int32_t)T1 >> 31; +} + +void OPPROTO op_rorl_T1_im(void) +{ + int shift; + shift = PARAM1; + T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); +} + +void OPPROTO op_rrxl_T1(void) +{ + T1 = ((uint32_t)T1 >> 1) | ((uint32_t)env->CF << 31); +} + +/* T1 based, set C flag */ +void OPPROTO op_shll_T1_im_cc(void) +{ + env->CF = (T1 >> (32 - PARAM1)) & 1; + T1 = T1 << PARAM1; +} + +void OPPROTO op_shrl_T1_im_cc(void) +{ + env->CF = (T1 >> (PARAM1 - 1)) & 1; + T1 = (uint32_t)T1 >> PARAM1; +} + +void OPPROTO op_shrl_T1_0_cc(void) +{ + env->CF = (T1 >> 31) & 1; + T1 = 0; +} + +void OPPROTO op_sarl_T1_im_cc(void) +{ + env->CF = (T1 >> (PARAM1 - 1)) & 1; + T1 = (int32_t)T1 >> PARAM1; +} + +void OPPROTO op_sarl_T1_0_cc(void) +{ + env->CF = (T1 >> 31) & 1; + T1 = (int32_t)T1 >> 31; +} + +void OPPROTO op_rorl_T1_im_cc(void) +{ + int shift; + shift = PARAM1; + env->CF = (T1 >> (shift - 1)) & 1; + T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); +} + +void OPPROTO op_rrxl_T1_cc(void) +{ + uint32_t c; + c = T1 & 1; + T1 = ((uint32_t)T1 >> 1) | ((uint32_t)env->CF << 31); + env->CF = c; +} + +/* T2 based */ +void OPPROTO op_shll_T2_im(void) +{ + T2 = T2 << PARAM1; +} + +void OPPROTO op_shrl_T2_im(void) +{ + T2 = (uint32_t)T2 >> PARAM1; +} + +void OPPROTO op_shrl_T2_0(void) +{ + T2 = 0; +} + +void OPPROTO op_sarl_T2_im(void) +{ + T2 = (int32_t)T2 >> PARAM1; +} + +void OPPROTO op_sarl_T2_0(void) +{ + T2 = (int32_t)T2 >> 31; +} + +void OPPROTO op_rorl_T2_im(void) +{ + int shift; + shift = PARAM1; + T2 = ((uint32_t)T2 >> shift) | (T2 << (32 - shift)); +} + +void OPPROTO op_rrxl_T2(void) +{ + T2 = ((uint32_t)T2 >> 1) | ((uint32_t)env->CF << 31); +} + +/* T1 based, use T0 as shift count */ + +void OPPROTO op_shll_T1_T0(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) + T1 = 0; + else + T1 = T1 << shift; + FORCE_RET(); +} + +void OPPROTO op_shrl_T1_T0(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) + T1 = 0; + else + T1 = (uint32_t)T1 >> shift; + FORCE_RET(); +} + +void OPPROTO op_sarl_T1_T0(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) + shift = 31; + T1 = (int32_t)T1 >> shift; +} + +void OPPROTO op_rorl_T1_T0(void) +{ + int shift; + shift = T0 & 0x1f; + if (shift) { + T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); + } + FORCE_RET(); +} + +/* T1 based, use T0 as shift count and compute CF */ + +void OPPROTO op_shll_T1_T0_cc(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = T1 & 1; + else + env->CF = 0; + T1 = 0; + } else if (shift != 0) { + env->CF = (T1 >> (32 - shift)) & 1; + T1 = T1 << shift; + } + FORCE_RET(); +} + +void OPPROTO op_shrl_T1_T0_cc(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = (T1 >> 31) & 1; + else + env->CF = 0; + T1 = 0; + } else if (shift != 0) { + env->CF = (T1 >> (shift - 1)) & 1; + T1 = (uint32_t)T1 >> shift; + } + FORCE_RET(); +} + +void OPPROTO op_sarl_T1_T0_cc(void) +{ + int shift; + shift = T0 & 0xff; + if (shift >= 32) { + env->CF = (T1 >> 31) & 1; + T1 = (int32_t)T1 >> 31; + } else { + env->CF = (T1 >> (shift - 1)) & 1; + T1 = (int32_t)T1 >> shift; + } + FORCE_RET(); +} + +void OPPROTO op_rorl_T1_T0_cc(void) +{ + int shift1, shift; + shift1 = T0 & 0xff; + shift = shift1 & 0x1f; + if (shift == 0) { + if (shift1 != 0) + env->CF = (T1 >> 31) & 1; + } else { + env->CF = (T1 >> (shift - 1)) & 1; + T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift)); + } + FORCE_RET(); +} + +/* misc */ +void OPPROTO op_clz_T0(void) +{ + int count; + for (count = 32; T0 > 0; count--) + T0 = T0 >> 1; + T0 = count; + FORCE_RET(); +} + +void OPPROTO op_sarl_T0_im(void) +{ + T0 = (int32_t)T0 >> PARAM1; +} + +/* Sign/zero extend */ +void OPPROTO op_sxth_T0(void) +{ + T0 = (int16_t)T0; +} + +void OPPROTO op_sxth_T1(void) +{ + T1 = (int16_t)T1; +} + +void OPPROTO op_sxtb_T1(void) +{ + T1 = (int8_t)T1; +} + +void OPPROTO op_uxtb_T1(void) +{ + T1 = (uint8_t)T1; +} + +void OPPROTO op_uxth_T1(void) +{ + T1 = (uint16_t)T1; +} + +void OPPROTO op_sxtb16_T1(void) +{ + uint32_t res; + res = (uint16_t)(int8_t)T1; + res |= (uint32_t)(int8_t)(T1 >> 16) << 16; + T1 = res; +} + +void OPPROTO op_uxtb16_T1(void) +{ + uint32_t res; + res = (uint16_t)(uint8_t)T1; + res |= (uint32_t)(uint8_t)(T1 >> 16) << 16; + T1 = res; +} + +#define SIGNBIT (uint32_t)0x80000000 +/* saturating arithmetic */ +void OPPROTO op_addl_T0_T1_setq(void) +{ + uint32_t res; + + res = T0 + T1; + if (((res ^ T0) & SIGNBIT) && !((T0 ^ T1) & SIGNBIT)) + env->QF = 1; + + T0 = res; + FORCE_RET(); +} + +void OPPROTO op_addl_T0_T1_saturate(void) +{ + uint32_t res; + + res = T0 + T1; + if (((res ^ T0) & SIGNBIT) && !((T0 ^ T1) & SIGNBIT)) { + env->QF = 1; + if (T0 & SIGNBIT) + T0 = 0x80000000; + else + T0 = 0x7fffffff; + } + else + T0 = res; + + FORCE_RET(); +} + +void OPPROTO op_subl_T0_T1_saturate(void) +{ + uint32_t res; + + res = T0 - T1; + if (((res ^ T0) & SIGNBIT) && ((T0 ^ T1) & SIGNBIT)) { + env->QF = 1; + if (T0 & SIGNBIT) + T0 = 0x80000000; + else + T0 = 0x7fffffff; + } + else + T0 = res; + + FORCE_RET(); +} + +void OPPROTO op_double_T1_saturate(void) +{ + int32_t val; + + val = T1; + if (val >= 0x40000000) { + T1 = 0x7fffffff; + env->QF = 1; + } else if (val <= (int32_t)0xc0000000) { + T1 = 0x80000000; + env->QF = 1; + } else { + T1 = val << 1; + } + FORCE_RET(); +} + +/* thumb shift by immediate */ +void OPPROTO op_shll_T0_im_thumb(void) +{ + int shift; + shift = PARAM1; + if (shift != 0) { + env->CF = (T1 >> (32 - shift)) & 1; + T0 = T0 << shift; + } + env->NZF = T0; + FORCE_RET(); +} + +void OPPROTO op_shrl_T0_im_thumb(void) +{ + int shift; + + shift = PARAM1; + if (shift == 0) { + env->CF = ((uint32_t)shift) >> 31; + T0 = 0; + } else { + env->CF = (T0 >> (shift - 1)) & 1; + T0 = T0 >> shift; + } + env->NZF = T0; + FORCE_RET(); +} + +void OPPROTO op_sarl_T0_im_thumb(void) +{ + int shift; + + shift = PARAM1; + if (shift == 0) { + T0 = ((int32_t)T0) >> 31; + env->CF = T0 & 1; + } else { + env->CF = (T0 >> (shift - 1)) & 1; + T0 = ((int32_t)T0) >> shift; + } + env->NZF = T0; + FORCE_RET(); +} + +/* exceptions */ + +void OPPROTO op_swi(void) +{ + env->exception_index = EXCP_SWI; + cpu_loop_exit(); +} + +void OPPROTO op_undef_insn(void) +{ + env->exception_index = EXCP_UDEF; + cpu_loop_exit(); +} + +void OPPROTO op_debug(void) +{ + env->exception_index = EXCP_DEBUG; + cpu_loop_exit(); +} + +void OPPROTO op_wfi(void) +{ + env->exception_index = EXCP_HLT; + env->halted = 1; + cpu_loop_exit(); +} + +void OPPROTO op_bkpt(void) +{ + env->exception_index = EXCP_BKPT; + cpu_loop_exit(); +} + +/* VFP support. We follow the convention used for VFP instrunctions: + Single precition routines have a "s" suffix, double precision a + "d" suffix. */ + +#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void) + +#define VFP_BINOP(name) \ +VFP_OP(name, s) \ +{ \ + FT0s = float32_ ## name (FT0s, FT1s, &env->vfp.fp_status); \ +} \ +VFP_OP(name, d) \ +{ \ + FT0d = float64_ ## name (FT0d, FT1d, &env->vfp.fp_status); \ +} +VFP_BINOP(add) +VFP_BINOP(sub) +VFP_BINOP(mul) +VFP_BINOP(div) +#undef VFP_BINOP + +#define VFP_HELPER(name) \ +VFP_OP(name, s) \ +{ \ + do_vfp_##name##s(); \ +} \ +VFP_OP(name, d) \ +{ \ + do_vfp_##name##d(); \ +} +VFP_HELPER(abs) +VFP_HELPER(sqrt) +VFP_HELPER(cmp) +VFP_HELPER(cmpe) +#undef VFP_HELPER + +/* XXX: Will this do the right thing for NANs. Should invert the signbit + without looking at the rest of the value. */ +VFP_OP(neg, s) +{ + FT0s = float32_chs(FT0s); +} + +VFP_OP(neg, d) +{ + FT0d = float64_chs(FT0d); +} + +VFP_OP(F1_ld0, s) +{ + union { + uint32_t i; + float32 s; + } v; + v.i = 0; + FT1s = v.s; +} + +VFP_OP(F1_ld0, d) +{ + union { + uint64_t i; + float64 d; + } v; + v.i = 0; + FT1d = v.d; +} + +/* Helper routines to perform bitwise copies between float and int. */ +static inline float32 vfp_itos(uint32_t i) +{ + union { + uint32_t i; + float32 s; + } v; + + v.i = i; + return v.s; +} + +static inline uint32_t vfp_stoi(float32 s) +{ + union { + uint32_t i; + float32 s; + } v; + + v.s = s; + return v.i; +} + +/* Integer to float conversion. */ +VFP_OP(uito, s) +{ + FT0s = uint32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status); +} + +VFP_OP(uito, d) +{ + FT0d = uint32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status); +} + +VFP_OP(sito, s) +{ + FT0s = int32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status); +} + +VFP_OP(sito, d) +{ + FT0d = int32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status); +} + +/* Float to integer conversion. */ +VFP_OP(toui, s) +{ + FT0s = vfp_itos(float32_to_uint32(FT0s, &env->vfp.fp_status)); +} + +VFP_OP(toui, d) +{ + FT0s = vfp_itos(float64_to_uint32(FT0d, &env->vfp.fp_status)); +} + +VFP_OP(tosi, s) +{ + FT0s = vfp_itos(float32_to_int32(FT0s, &env->vfp.fp_status)); +} + +VFP_OP(tosi, d) +{ + FT0s = vfp_itos(float64_to_int32(FT0d, &env->vfp.fp_status)); +} + +/* TODO: Set rounding mode properly. */ +VFP_OP(touiz, s) +{ + FT0s = vfp_itos(float32_to_uint32_round_to_zero(FT0s, &env->vfp.fp_status)); +} + +VFP_OP(touiz, d) +{ + FT0s = vfp_itos(float64_to_uint32_round_to_zero(FT0d, &env->vfp.fp_status)); +} + +VFP_OP(tosiz, s) +{ + FT0s = vfp_itos(float32_to_int32_round_to_zero(FT0s, &env->vfp.fp_status)); +} + +VFP_OP(tosiz, d) +{ + FT0s = vfp_itos(float64_to_int32_round_to_zero(FT0d, &env->vfp.fp_status)); +} + +/* floating point conversion */ +VFP_OP(fcvtd, s) +{ + FT0d = float32_to_float64(FT0s, &env->vfp.fp_status); +} + +VFP_OP(fcvts, d) +{ + FT0s = float64_to_float32(FT0d, &env->vfp.fp_status); +} + +/* Get and Put values from registers. */ +VFP_OP(getreg_F0, d) +{ + FT0d = *(float64 *)((char *) env + PARAM1); +} + +VFP_OP(getreg_F0, s) +{ + FT0s = *(float32 *)((char *) env + PARAM1); +} + +VFP_OP(getreg_F1, d) +{ + FT1d = *(float64 *)((char *) env + PARAM1); +} + +VFP_OP(getreg_F1, s) +{ + FT1s = *(float32 *)((char *) env + PARAM1); +} + +VFP_OP(setreg_F0, d) +{ + *(float64 *)((char *) env + PARAM1) = FT0d; +} + +VFP_OP(setreg_F0, s) +{ + *(float32 *)((char *) env + PARAM1) = FT0s; +} + +void OPPROTO op_vfp_movl_T0_fpscr(void) +{ + do_vfp_get_fpscr (); +} + +void OPPROTO op_vfp_movl_T0_fpscr_flags(void) +{ + T0 = env->vfp.xregs[ARM_VFP_FPSCR] & (0xf << 28); +} + +void OPPROTO op_vfp_movl_fpscr_T0(void) +{ + do_vfp_set_fpscr(); +} + +void OPPROTO op_vfp_movl_T0_xreg(void) +{ + T0 = env->vfp.xregs[PARAM1]; +} + +void OPPROTO op_vfp_movl_xreg_T0(void) +{ + env->vfp.xregs[PARAM1] = T0; +} + +/* Move between FT0s to T0 */ +void OPPROTO op_vfp_mrs(void) +{ + T0 = vfp_stoi(FT0s); +} + +void OPPROTO op_vfp_msr(void) +{ + FT0s = vfp_itos(T0); +} + +/* Move between FT0d and {T0,T1} */ +void OPPROTO op_vfp_mrrd(void) +{ + CPU_DoubleU u; + + u.d = FT0d; + T0 = u.l.lower; + T1 = u.l.upper; +} + +void OPPROTO op_vfp_mdrr(void) +{ + CPU_DoubleU u; + + u.l.lower = T0; + u.l.upper = T1; + FT0d = u.d; +} + +/* Copy the most significant bit to T0 to all bits of T1. */ +void OPPROTO op_signbit_T1_T0(void) +{ + T1 = (int32_t)T0 >> 31; +} + +void OPPROTO op_movl_cp15_T0(void) +{ + helper_set_cp15(env, PARAM1, T0); + FORCE_RET(); +} + +void OPPROTO op_movl_T0_cp15(void) +{ + T0 = helper_get_cp15(env, PARAM1); + FORCE_RET(); +} + +/* Access to user mode registers from privileged modes. */ +void OPPROTO op_movl_T0_user(void) +{ + int regno = PARAM1; + if (regno == 13) { + T0 = env->banked_r13[0]; + } else if (regno == 14) { + T0 = env->banked_r14[0]; + } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + T0 = env->usr_regs[regno - 8]; + } else { + T0 = env->regs[regno]; + } + FORCE_RET(); +} + + +void OPPROTO op_movl_user_T0(void) +{ + int regno = PARAM1; + if (regno == 13) { + env->banked_r13[0] = T0; + } else if (regno == 14) { + env->banked_r14[0] = T0; + } else if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + env->usr_regs[regno - 8] = T0; + } else { + env->regs[regno] = T0; + } + FORCE_RET(); +} + +void OPPROTO op_movl_T2_T0(void) +{ + T2 = T0; +} + +void OPPROTO op_movl_T0_T2(void) +{ + T0 = T2; +} diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c new file mode 100644 index 0000000..af5c61d --- /dev/null +++ b/target-arm/op_helper.c @@ -0,0 +1,227 @@ +/* + * ARM helper routines + * + * Copyright (c) 2005 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "exec.h" + +void raise_exception(int tt) +{ + env->exception_index = tt; + cpu_loop_exit(); +} + +/* thread support */ + +spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; + +void cpu_lock(void) +{ + spin_lock(&global_cpu_lock); +} + +void cpu_unlock(void) +{ + spin_unlock(&global_cpu_lock); +} + +/* VFP support. */ + +void do_vfp_abss(void) +{ + FT0s = float32_abs(FT0s); +} + +void do_vfp_absd(void) +{ + FT0d = float64_abs(FT0d); +} + +void do_vfp_sqrts(void) +{ + FT0s = float32_sqrt(FT0s, &env->vfp.fp_status); +} + +void do_vfp_sqrtd(void) +{ + FT0d = float64_sqrt(FT0d, &env->vfp.fp_status); +} + +/* XXX: check quiet/signaling case */ +#define DO_VFP_cmp(p, size) \ +void do_vfp_cmp##p(void) \ +{ \ + uint32_t flags; \ + switch(float ## size ## _compare_quiet(FT0##p, FT1##p, &env->vfp.fp_status)) {\ + case 0: flags = 0x6; break;\ + case -1: flags = 0x8; break;\ + case 1: flags = 0x2; break;\ + default: case 2: flags = 0x3; break;\ + }\ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\ + | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ + FORCE_RET(); \ +}\ +\ +void do_vfp_cmpe##p(void) \ +{ \ + uint32_t flags; \ + switch(float ## size ## _compare(FT0##p, FT1##p, &env->vfp.fp_status)) {\ + case 0: flags = 0x6; break;\ + case -1: flags = 0x8; break;\ + case 1: flags = 0x2; break;\ + default: case 2: flags = 0x3; break;\ + }\ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\ + | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ + FORCE_RET(); \ +} +DO_VFP_cmp(s, 32) +DO_VFP_cmp(d, 64) +#undef DO_VFP_cmp + +/* Convert host exception flags to vfp form. */ +static inline int vfp_exceptbits_from_host(int host_bits) +{ + int target_bits = 0; + + if (host_bits & float_flag_invalid) + target_bits |= 1; + if (host_bits & float_flag_divbyzero) + target_bits |= 2; + if (host_bits & float_flag_overflow) + target_bits |= 4; + if (host_bits & float_flag_underflow) + target_bits |= 8; + if (host_bits & float_flag_inexact) + target_bits |= 0x10; + return target_bits; +} + +/* Convert vfp exception flags to target form. */ +static inline int vfp_exceptbits_to_host(int target_bits) +{ + int host_bits = 0; + + if (target_bits & 1) + host_bits |= float_flag_invalid; + if (target_bits & 2) + host_bits |= float_flag_divbyzero; + if (target_bits & 4) + host_bits |= float_flag_overflow; + if (target_bits & 8) + host_bits |= float_flag_underflow; + if (target_bits & 0x10) + host_bits |= float_flag_inexact; + return host_bits; +} + +void do_vfp_set_fpscr(void) +{ + int i; + uint32_t changed; + + changed = env->vfp.xregs[ARM_VFP_FPSCR]; + env->vfp.xregs[ARM_VFP_FPSCR] = (T0 & 0xffc8ffff); + env->vfp.vec_len = (T0 >> 16) & 7; + env->vfp.vec_stride = (T0 >> 20) & 3; + + changed ^= T0; + if (changed & (3 << 22)) { + i = (T0 >> 22) & 3; + switch (i) { + case 0: + i = float_round_nearest_even; + break; + case 1: + i = float_round_up; + break; + case 2: + i = float_round_down; + break; + case 3: + i = float_round_to_zero; + break; + } + set_float_rounding_mode(i, &env->vfp.fp_status); + } + + i = vfp_exceptbits_to_host((T0 >> 8) & 0x1f); + set_float_exception_flags(i, &env->vfp.fp_status); + /* XXX: FZ and DN are not implemented. */ +} + +void do_vfp_get_fpscr(void) +{ + int i; + + T0 = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) | (env->vfp.vec_len << 16) + | (env->vfp.vec_stride << 20); + i = get_float_exception_flags(&env->vfp.fp_status); + T0 |= vfp_exceptbits_from_host(i); +} + +#if !defined(CONFIG_USER_ONLY) + +#define MMUSUFFIX _mmu +#define GETPC() (__builtin_return_address(0)) + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr) +{ + TranslationBlock *tb; + CPUState *saved_env; + target_phys_addr_t pc; + int ret; + + /* XXX: hack to restore env in all cases, even if not called from + generated code */ + saved_env = env; + env = cpu_single_env; + ret = cpu_arm_handle_mmu_fault(env, addr, is_write, is_user, 1); + if (__builtin_expect(ret, 0)) { + if (retaddr) { + /* now we have a real cpu fault */ + pc = (target_phys_addr_t)retaddr; + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc, NULL); + } + } + raise_exception(env->exception_index); + } + env = saved_env; +} + +#endif diff --git a/target-arm/op_mem.h b/target-arm/op_mem.h new file mode 100644 index 0000000..29fd85b --- /dev/null +++ b/target-arm/op_mem.h @@ -0,0 +1,70 @@ +/* ARM memory operations. */ + +/* Load from address T1 into T0. */ +#define MEM_LD_OP(name) \ +void OPPROTO glue(op_ld##name,MEMSUFFIX)(void) \ +{ \ + T0 = glue(ld##name,MEMSUFFIX)(T1); \ + FORCE_RET(); \ +} + +MEM_LD_OP(ub) +MEM_LD_OP(sb) +MEM_LD_OP(uw) +MEM_LD_OP(sw) +MEM_LD_OP(l) + +#undef MEM_LD_OP + +/* Store T0 to address T1. */ +#define MEM_ST_OP(name) \ +void OPPROTO glue(op_st##name,MEMSUFFIX)(void) \ +{ \ + glue(st##name,MEMSUFFIX)(T1, T0); \ + FORCE_RET(); \ +} + +MEM_ST_OP(b) +MEM_ST_OP(w) +MEM_ST_OP(l) + +#undef MEM_ST_OP + +/* Swap T0 with memory at address T1. */ +/* ??? Is this exception safe? */ +#define MEM_SWP_OP(name, lname) \ +void OPPROTO glue(op_swp##name,MEMSUFFIX)(void) \ +{ \ + uint32_t tmp; \ + cpu_lock(); \ + tmp = glue(ld##lname,MEMSUFFIX)(T1); \ + glue(st##name,MEMSUFFIX)(T1, T0); \ + T0 = tmp; \ + cpu_unlock(); \ + FORCE_RET(); \ +} + +MEM_SWP_OP(b, ub) +MEM_SWP_OP(l, l) + +#undef MEM_SWP_OP + +/* Floating point load/store. Address is in T1 */ +#define VFP_MEM_OP(p, w) \ +void OPPROTO glue(op_vfp_ld##p,MEMSUFFIX)(void) \ +{ \ + FT0##p = glue(ldf##w,MEMSUFFIX)(T1); \ + FORCE_RET(); \ +} \ +void OPPROTO glue(op_vfp_st##p,MEMSUFFIX)(void) \ +{ \ + glue(stf##w,MEMSUFFIX)(T1, FT0##p); \ + FORCE_RET(); \ +} + +VFP_MEM_OP(s,l) +VFP_MEM_OP(d,q) + +#undef VFP_MEM_OP + +#undef MEMSUFFIX diff --git a/target-arm/op_template.h b/target-arm/op_template.h new file mode 100644 index 0000000..fb2add1 --- /dev/null +++ b/target-arm/op_template.h @@ -0,0 +1,53 @@ +/* + * ARM micro operations (templates for various register related + * operations) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef SET_REG +#define SET_REG(x) REG = x +#endif + +void OPPROTO glue(op_movl_T0_, REGNAME)(void) +{ + T0 = REG; +} + +void OPPROTO glue(op_movl_T1_, REGNAME)(void) +{ + T1 = REG; +} + +void OPPROTO glue(op_movl_T2_, REGNAME)(void) +{ + T2 = REG; +} + +void OPPROTO glue(glue(op_movl_, REGNAME), _T0)(void) +{ + SET_REG (T0); +} + +void OPPROTO glue(glue(op_movl_, REGNAME), _T1)(void) +{ + SET_REG (T1); +} + +#undef REG +#undef REGNAME +#undef SET_REG diff --git a/target-arm/translate.c b/target-arm/translate.c new file mode 100644 index 0000000..fa7ad60 --- /dev/null +++ b/target-arm/translate.c @@ -0,0 +1,2576 @@ +/* + * ARM translation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include + +#include "cpu.h" +#include "exec-all.h" +#include "disas.h" + +#define ENABLE_ARCH_5J 0 +#define ENABLE_ARCH_6 1 +#define ENABLE_ARCH_6T2 1 + +#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op; + +/* internal defines */ +typedef struct DisasContext { + target_ulong pc; + int is_jmp; + /* Nonzero if this instruction has been conditionally skipped. */ + int condjmp; + /* The label that will be jumped to when the instruction is skipped. */ + int condlabel; + struct TranslationBlock *tb; + int singlestep_enabled; + int thumb; +#if !defined(CONFIG_USER_ONLY) + int user; +#endif +} DisasContext; + +#if defined(CONFIG_USER_ONLY) +#define IS_USER(s) 1 +#else +#define IS_USER(s) (s->user) +#endif + +#define DISAS_JUMP_NEXT 4 + +#ifdef USE_DIRECT_JUMP +#define TBPARAM(x) +#else +#define TBPARAM(x) (long)(x) +#endif + +/* XXX: move that elsewhere */ +static uint16_t *gen_opc_ptr; +static uint32_t *gen_opparam_ptr; +extern FILE *logfile; +extern int loglevel; + +enum { +#define DEF(s, n, copy_size) INDEX_op_ ## s, +#include "opc.h" +#undef DEF + NB_OPS, +}; + +#include "gen-op.h" + +static GenOpFunc1 *gen_test_cc[14] = { + gen_op_test_eq, + gen_op_test_ne, + gen_op_test_cs, + gen_op_test_cc, + gen_op_test_mi, + gen_op_test_pl, + gen_op_test_vs, + gen_op_test_vc, + gen_op_test_hi, + gen_op_test_ls, + gen_op_test_ge, + gen_op_test_lt, + gen_op_test_gt, + gen_op_test_le, +}; + +const uint8_t table_logic_cc[16] = { + 1, /* and */ + 1, /* xor */ + 0, /* sub */ + 0, /* rsb */ + 0, /* add */ + 0, /* adc */ + 0, /* sbc */ + 0, /* rsc */ + 1, /* andl */ + 1, /* xorl */ + 0, /* cmp */ + 0, /* cmn */ + 1, /* orr */ + 1, /* mov */ + 1, /* bic */ + 1, /* mvn */ +}; + +static GenOpFunc1 *gen_shift_T1_im[4] = { + gen_op_shll_T1_im, + gen_op_shrl_T1_im, + gen_op_sarl_T1_im, + gen_op_rorl_T1_im, +}; + +static GenOpFunc *gen_shift_T1_0[4] = { + NULL, + gen_op_shrl_T1_0, + gen_op_sarl_T1_0, + gen_op_rrxl_T1, +}; + +static GenOpFunc1 *gen_shift_T2_im[4] = { + gen_op_shll_T2_im, + gen_op_shrl_T2_im, + gen_op_sarl_T2_im, + gen_op_rorl_T2_im, +}; + +static GenOpFunc *gen_shift_T2_0[4] = { + NULL, + gen_op_shrl_T2_0, + gen_op_sarl_T2_0, + gen_op_rrxl_T2, +}; + +static GenOpFunc1 *gen_shift_T1_im_cc[4] = { + gen_op_shll_T1_im_cc, + gen_op_shrl_T1_im_cc, + gen_op_sarl_T1_im_cc, + gen_op_rorl_T1_im_cc, +}; + +static GenOpFunc *gen_shift_T1_0_cc[4] = { + NULL, + gen_op_shrl_T1_0_cc, + gen_op_sarl_T1_0_cc, + gen_op_rrxl_T1_cc, +}; + +static GenOpFunc *gen_shift_T1_T0[4] = { + gen_op_shll_T1_T0, + gen_op_shrl_T1_T0, + gen_op_sarl_T1_T0, + gen_op_rorl_T1_T0, +}; + +static GenOpFunc *gen_shift_T1_T0_cc[4] = { + gen_op_shll_T1_T0_cc, + gen_op_shrl_T1_T0_cc, + gen_op_sarl_T1_T0_cc, + gen_op_rorl_T1_T0_cc, +}; + +static GenOpFunc *gen_op_movl_TN_reg[3][16] = { + { + gen_op_movl_T0_r0, + gen_op_movl_T0_r1, + gen_op_movl_T0_r2, + gen_op_movl_T0_r3, + gen_op_movl_T0_r4, + gen_op_movl_T0_r5, + gen_op_movl_T0_r6, + gen_op_movl_T0_r7, + gen_op_movl_T0_r8, + gen_op_movl_T0_r9, + gen_op_movl_T0_r10, + gen_op_movl_T0_r11, + gen_op_movl_T0_r12, + gen_op_movl_T0_r13, + gen_op_movl_T0_r14, + gen_op_movl_T0_r15, + }, + { + gen_op_movl_T1_r0, + gen_op_movl_T1_r1, + gen_op_movl_T1_r2, + gen_op_movl_T1_r3, + gen_op_movl_T1_r4, + gen_op_movl_T1_r5, + gen_op_movl_T1_r6, + gen_op_movl_T1_r7, + gen_op_movl_T1_r8, + gen_op_movl_T1_r9, + gen_op_movl_T1_r10, + gen_op_movl_T1_r11, + gen_op_movl_T1_r12, + gen_op_movl_T1_r13, + gen_op_movl_T1_r14, + gen_op_movl_T1_r15, + }, + { + gen_op_movl_T2_r0, + gen_op_movl_T2_r1, + gen_op_movl_T2_r2, + gen_op_movl_T2_r3, + gen_op_movl_T2_r4, + gen_op_movl_T2_r5, + gen_op_movl_T2_r6, + gen_op_movl_T2_r7, + gen_op_movl_T2_r8, + gen_op_movl_T2_r9, + gen_op_movl_T2_r10, + gen_op_movl_T2_r11, + gen_op_movl_T2_r12, + gen_op_movl_T2_r13, + gen_op_movl_T2_r14, + gen_op_movl_T2_r15, + }, +}; + +static GenOpFunc *gen_op_movl_reg_TN[2][16] = { + { + gen_op_movl_r0_T0, + gen_op_movl_r1_T0, + gen_op_movl_r2_T0, + gen_op_movl_r3_T0, + gen_op_movl_r4_T0, + gen_op_movl_r5_T0, + gen_op_movl_r6_T0, + gen_op_movl_r7_T0, + gen_op_movl_r8_T0, + gen_op_movl_r9_T0, + gen_op_movl_r10_T0, + gen_op_movl_r11_T0, + gen_op_movl_r12_T0, + gen_op_movl_r13_T0, + gen_op_movl_r14_T0, + gen_op_movl_r15_T0, + }, + { + gen_op_movl_r0_T1, + gen_op_movl_r1_T1, + gen_op_movl_r2_T1, + gen_op_movl_r3_T1, + gen_op_movl_r4_T1, + gen_op_movl_r5_T1, + gen_op_movl_r6_T1, + gen_op_movl_r7_T1, + gen_op_movl_r8_T1, + gen_op_movl_r9_T1, + gen_op_movl_r10_T1, + gen_op_movl_r11_T1, + gen_op_movl_r12_T1, + gen_op_movl_r13_T1, + gen_op_movl_r14_T1, + gen_op_movl_r15_T1, + }, +}; + +static GenOpFunc1 *gen_op_movl_TN_im[3] = { + gen_op_movl_T0_im, + gen_op_movl_T1_im, + gen_op_movl_T2_im, +}; + +static GenOpFunc1 *gen_shift_T0_im_thumb[3] = { + gen_op_shll_T0_im_thumb, + gen_op_shrl_T0_im_thumb, + gen_op_sarl_T0_im_thumb, +}; + +static inline void gen_bx(DisasContext *s) +{ + s->is_jmp = DISAS_UPDATE; + gen_op_bx_T0(); +} + + +#if defined(CONFIG_USER_ONLY) +#define gen_ldst(name, s) gen_op_##name##_raw() +#else +#define gen_ldst(name, s) do { \ + if (IS_USER(s)) \ + gen_op_##name##_user(); \ + else \ + gen_op_##name##_kernel(); \ + } while (0) +#endif + +static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t) +{ + int val; + + if (reg == 15) { + /* normaly, since we updated PC, we need only to add one insn */ + if (s->thumb) + val = (long)s->pc + 2; + else + val = (long)s->pc + 4; + gen_op_movl_TN_im[t](val); + } else { + gen_op_movl_TN_reg[t][reg](); + } +} + +static inline void gen_movl_T0_reg(DisasContext *s, int reg) +{ + gen_movl_TN_reg(s, reg, 0); +} + +static inline void gen_movl_T1_reg(DisasContext *s, int reg) +{ + gen_movl_TN_reg(s, reg, 1); +} + +static inline void gen_movl_T2_reg(DisasContext *s, int reg) +{ + gen_movl_TN_reg(s, reg, 2); +} + +static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t) +{ + gen_op_movl_reg_TN[t][reg](); + if (reg == 15) { + s->is_jmp = DISAS_JUMP; + } +} + +static inline void gen_movl_reg_T0(DisasContext *s, int reg) +{ + gen_movl_reg_TN(s, reg, 0); +} + +static inline void gen_movl_reg_T1(DisasContext *s, int reg) +{ + gen_movl_reg_TN(s, reg, 1); +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static inline void gen_lookup_tb(DisasContext *s) +{ + gen_op_movl_T0_im(s->pc); + gen_movl_reg_T0(s, 15); + s->is_jmp = DISAS_UPDATE; +} + +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn) +{ + int val, rm, shift, shiftop; + + if (!(insn & (1 << 25))) { + /* immediate */ + val = insn & 0xfff; + if (!(insn & (1 << 23))) + val = -val; + if (val != 0) + gen_op_addl_T1_im(val); + } else { + /* shift/register */ + rm = (insn) & 0xf; + shift = (insn >> 7) & 0x1f; + gen_movl_T2_reg(s, rm); + shiftop = (insn >> 5) & 3; + if (shift != 0) { + gen_shift_T2_im[shiftop](shift); + } else if (shiftop != 0) { + gen_shift_T2_0[shiftop](); + } + if (!(insn & (1 << 23))) + gen_op_subl_T1_T2(); + else + gen_op_addl_T1_T2(); + } +} + +static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, + int extra) +{ + int val, rm; + + if (insn & (1 << 22)) { + /* immediate */ + val = (insn & 0xf) | ((insn >> 4) & 0xf0); + val += extra; + if (!(insn & (1 << 23))) + val = -val; + if (val != 0) + gen_op_addl_T1_im(val); + } else { + /* register */ + if (extra) + gen_op_addl_T1_im(extra); + rm = (insn) & 0xf; + gen_movl_T2_reg(s, rm); + if (!(insn & (1 << 23))) + gen_op_subl_T1_T2(); + else + gen_op_addl_T1_T2(); + } +} + +#define VFP_OP(name) \ +static inline void gen_vfp_##name(int dp) \ +{ \ + if (dp) \ + gen_op_vfp_##name##d(); \ + else \ + gen_op_vfp_##name##s(); \ +} + +VFP_OP(add) +VFP_OP(sub) +VFP_OP(mul) +VFP_OP(div) +VFP_OP(neg) +VFP_OP(abs) +VFP_OP(sqrt) +VFP_OP(cmp) +VFP_OP(cmpe) +VFP_OP(F1_ld0) +VFP_OP(uito) +VFP_OP(sito) +VFP_OP(toui) +VFP_OP(touiz) +VFP_OP(tosi) +VFP_OP(tosiz) + +#undef VFP_OP + +static inline void gen_vfp_ld(DisasContext *s, int dp) +{ + if (dp) + gen_ldst(vfp_ldd, s); + else + gen_ldst(vfp_lds, s); +} + +static inline void gen_vfp_st(DisasContext *s, int dp) +{ + if (dp) + gen_ldst(vfp_std, s); + else + gen_ldst(vfp_sts, s); +} + +static inline long +vfp_reg_offset (int dp, int reg) +{ + if (dp) + return offsetof(CPUARMState, vfp.regs[reg]); + else if (reg & 1) { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.upper); + } else { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.lower); + } +} +static inline void gen_mov_F0_vreg(int dp, int reg) +{ + if (dp) + gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg)); + else + gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_F1_vreg(int dp, int reg) +{ + if (dp) + gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg)); + else + gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_vreg_F0(int dp, int reg) +{ + if (dp) + gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg)); + else + gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg)); +} + +/* Disassemble system coprocessor (cp15) instruction. Return nonzero if + instruction is not defined. */ +static int disas_cp15_insn(DisasContext *s, uint32_t insn) +{ + uint32_t rd; + + /* ??? Some cp15 registers are accessible from userspace. */ + if (IS_USER(s)) { + return 1; + } + if ((insn & 0x0fff0fff) == 0x0e070f90 + || (insn & 0x0fff0fff) == 0x0e070f58) { + /* Wait for interrupt. */ + gen_op_movl_T0_im((long)s->pc); + gen_op_movl_reg_TN[0][15](); + gen_op_wfi(); + s->is_jmp = DISAS_JUMP; + return 0; + } + rd = (insn >> 12) & 0xf; + if (insn & (1 << 20)) { + gen_op_movl_T0_cp15(insn); + /* If the destination register is r15 then sets condition codes. */ + if (rd != 15) + gen_movl_reg_T0(s, rd); + } else { + gen_movl_T0_reg(s, rd); + gen_op_movl_cp15_T0(insn); + } + gen_lookup_tb(s); + return 0; +} + +/* Disassemble a VFP instruction. Returns nonzero if an error occured + (ie. an undefined instruction). */ +static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn) +{ + uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; + int dp, veclen; + + if (!arm_feature(env, ARM_FEATURE_VFP)) + return 1; + + if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) { + /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */ + if ((insn & 0x0fe00fff) != 0x0ee00a10) + return 1; + rn = (insn >> 16) & 0xf; + if (rn != 0 && rn != 8) + return 1; + } + dp = ((insn & 0xf00) == 0xb00); + switch ((insn >> 24) & 0xf) { + case 0xe: + if (insn & (1 << 4)) { + /* single register transfer */ + if ((insn & 0x6f) != 0x00) + return 1; + rd = (insn >> 12) & 0xf; + if (dp) { + if (insn & 0x80) + return 1; + rn = (insn >> 16) & 0xf; + /* Get the existing value even for arm->vfp moves because + we only set half the register. */ + gen_mov_F0_vreg(1, rn); + gen_op_vfp_mrrd(); + if (insn & (1 << 20)) { + /* vfp->arm */ + if (insn & (1 << 21)) + gen_movl_reg_T1(s, rd); + else + gen_movl_reg_T0(s, rd); + } else { + /* arm->vfp */ + if (insn & (1 << 21)) + gen_movl_T1_reg(s, rd); + else + gen_movl_T0_reg(s, rd); + gen_op_vfp_mdrr(); + gen_mov_vreg_F0(dp, rn); + } + } else { + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); + if (insn & (1 << 20)) { + /* vfp->arm */ + if (insn & (1 << 21)) { + /* system register */ + rn >>= 1; + switch (rn) { + case ARM_VFP_FPSID: + case ARM_VFP_FPEXC: + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + gen_op_vfp_movl_T0_xreg(rn); + break; + case ARM_VFP_FPSCR: + if (rd == 15) + gen_op_vfp_movl_T0_fpscr_flags(); + else + gen_op_vfp_movl_T0_fpscr(); + break; + default: + return 1; + } + } else { + gen_mov_F0_vreg(0, rn); + gen_op_vfp_mrs(); + } + if (rd == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_op_movl_cpsr_T0(0xf0000000); + } else + gen_movl_reg_T0(s, rd); + } else { + /* arm->vfp */ + gen_movl_T0_reg(s, rd); + if (insn & (1 << 21)) { + rn >>= 1; + /* system register */ + switch (rn) { + case ARM_VFP_FPSID: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + gen_op_vfp_movl_fpscr_T0(); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + gen_op_vfp_movl_xreg_T0(rn); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + gen_op_vfp_movl_xreg_T0(rn); + break; + default: + return 1; + } + } else { + gen_op_vfp_msr(); + gen_mov_vreg_F0(0, rn); + } + } + } + } else { + /* data processing */ + /* The opcode is in bits 23, 21, 20 and 6. */ + op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); + if (dp) { + if (op == 15) { + /* rn is opcode */ + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); + } else { + /* rn is register number */ + if (insn & (1 << 7)) + return 1; + rn = (insn >> 16) & 0xf; + } + + if (op == 15 && (rn == 15 || rn > 17)) { + /* Integer or single precision destination. */ + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); + } else { + if (insn & (1 << 22)) + return 1; + rd = (insn >> 12) & 0xf; + } + + if (op == 15 && (rn == 16 || rn == 17)) { + /* Integer source. */ + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); + } else { + if (insn & (1 << 5)) + return 1; + rm = insn & 0xf; + } + } else { + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); + if (op == 15 && rn == 15) { + /* Double precision destination. */ + if (insn & (1 << 22)) + return 1; + rd = (insn >> 12) & 0xf; + } else + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); + } + + veclen = env->vfp.vec_len; + if (op == 15 && rn > 3) + veclen = 0; + + /* Shut up compiler warnings. */ + delta_m = 0; + delta_d = 0; + bank_mask = 0; + + if (veclen > 0) { + if (dp) + bank_mask = 0xc; + else + bank_mask = 0x18; + + /* Figure out what type of vector operation this is. */ + if ((rd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + if (dp) + delta_d = (env->vfp.vec_stride >> 1) + 1; + else + delta_d = env->vfp.vec_stride + 1; + + if ((rm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + /* Load the initial operands. */ + if (op == 15) { + switch (rn) { + case 16: + case 17: + /* Integer source */ + gen_mov_F0_vreg(0, rm); + break; + case 8: + case 9: + /* Compare */ + gen_mov_F0_vreg(dp, rd); + gen_mov_F1_vreg(dp, rm); + break; + case 10: + case 11: + /* Compare with zero */ + gen_mov_F0_vreg(dp, rd); + gen_vfp_F1_ld0(dp); + break; + default: + /* One source operand. */ + gen_mov_F0_vreg(dp, rm); + } + } else { + /* Two source operands. */ + gen_mov_F0_vreg(dp, rn); + gen_mov_F1_vreg(dp, rm); + } + + for (;;) { + /* Perform the calculation. */ + switch (op) { + case 0: /* mac: fd + (fn * fm) */ + gen_vfp_mul(dp); + gen_mov_F1_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 1: /* nmac: fd - (fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_neg(dp); + gen_mov_F1_vreg(dp, rd); + gen_vfp_add(dp); + break; + case 2: /* msc: -fd + (fn * fm) */ + gen_vfp_mul(dp); + gen_mov_F1_vreg(dp, rd); + gen_vfp_sub(dp); + break; + case 3: /* nmsc: -fd - (fn * fm) */ + gen_vfp_mul(dp); + gen_mov_F1_vreg(dp, rd); + gen_vfp_add(dp); + gen_vfp_neg(dp); + break; + case 4: /* mul: fn * fm */ + gen_vfp_mul(dp); + break; + case 5: /* nmul: -(fn * fm) */ + gen_vfp_mul(dp); + gen_vfp_neg(dp); + break; + case 6: /* add: fn + fm */ + gen_vfp_add(dp); + break; + case 7: /* sub: fn - fm */ + gen_vfp_sub(dp); + break; + case 8: /* div: fn / fm */ + gen_vfp_div(dp); + break; + case 15: /* extension space */ + switch (rn) { + case 0: /* cpy */ + /* no-op */ + break; + case 1: /* abs */ + gen_vfp_abs(dp); + break; + case 2: /* neg */ + gen_vfp_neg(dp); + break; + case 3: /* sqrt */ + gen_vfp_sqrt(dp); + break; + case 8: /* cmp */ + gen_vfp_cmp(dp); + break; + case 9: /* cmpe */ + gen_vfp_cmpe(dp); + break; + case 10: /* cmpz */ + gen_vfp_cmp(dp); + break; + case 11: /* cmpez */ + gen_vfp_F1_ld0(dp); + gen_vfp_cmpe(dp); + break; + case 15: /* single<->double conversion */ + if (dp) + gen_op_vfp_fcvtsd(); + else + gen_op_vfp_fcvtds(); + break; + case 16: /* fuito */ + gen_vfp_uito(dp); + break; + case 17: /* fsito */ + gen_vfp_sito(dp); + break; + case 24: /* ftoui */ + gen_vfp_toui(dp); + break; + case 25: /* ftouiz */ + gen_vfp_touiz(dp); + break; + case 26: /* ftosi */ + gen_vfp_tosi(dp); + break; + case 27: /* ftosiz */ + gen_vfp_tosiz(dp); + break; + default: /* undefined */ + printf ("rn:%d\n", rn); + return 1; + } + break; + default: /* undefined */ + printf ("op:%d\n", op); + return 1; + } + + /* Write back the result. */ + if (op == 15 && (rn >= 8 && rn <= 11)) + ; /* Comparison, do nothing. */ + else if (op == 15 && rn > 17) + /* Integer result. */ + gen_mov_vreg_F0(0, rd); + else if (op == 15 && rn == 15) + /* conversion */ + gen_mov_vreg_F0(!dp, rd); + else + gen_mov_vreg_F0(dp, rd); + + /* break out of the loop if we have finished */ + if (veclen == 0) + break; + + if (op == 15 && delta_m == 0) { + /* single source one-many */ + while (veclen--) { + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + gen_mov_vreg_F0(dp, rd); + } + break; + } + /* Setup the next operands. */ + veclen--; + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + + if (op == 15) { + /* One source operand. */ + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F0_vreg(dp, rm); + } else { + /* Two source operands. */ + rn = ((rn + delta_d) & (bank_mask - 1)) + | (rn & bank_mask); + gen_mov_F0_vreg(dp, rn); + if (delta_m) { + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F1_vreg(dp, rm); + } + } + } + } + break; + case 0xc: + case 0xd: + if (dp && (insn & (1 << 22))) { + /* two-register transfer */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (dp) { + if (insn & (1 << 5)) + return 1; + rm = insn & 0xf; + } else + rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1); + + if (insn & (1 << 20)) { + /* vfp->arm */ + if (dp) { + gen_mov_F0_vreg(1, rm); + gen_op_vfp_mrrd(); + gen_movl_reg_T0(s, rd); + gen_movl_reg_T1(s, rn); + } else { + gen_mov_F0_vreg(0, rm); + gen_op_vfp_mrs(); + gen_movl_reg_T0(s, rn); + gen_mov_F0_vreg(0, rm + 1); + gen_op_vfp_mrs(); + gen_movl_reg_T0(s, rd); + } + } else { + /* arm->vfp */ + if (dp) { + gen_movl_T0_reg(s, rd); + gen_movl_T1_reg(s, rn); + gen_op_vfp_mdrr(); + gen_mov_vreg_F0(1, rm); + } else { + gen_movl_T0_reg(s, rn); + gen_op_vfp_msr(); + gen_mov_vreg_F0(0, rm); + gen_movl_T0_reg(s, rd); + gen_op_vfp_msr(); + gen_mov_vreg_F0(0, rm + 1); + } + } + } else { + /* Load/store */ + rn = (insn >> 16) & 0xf; + if (dp) + rd = (insn >> 12) & 0xf; + else + rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1); + gen_movl_T1_reg(s, rn); + if ((insn & 0x01200000) == 0x01000000) { + /* Single load/store */ + offset = (insn & 0xff) << 2; + if ((insn & (1 << 23)) == 0) + offset = -offset; + gen_op_addl_T1_im(offset); + if (insn & (1 << 20)) { + gen_vfp_ld(s, dp); + gen_mov_vreg_F0(dp, rd); + } else { + gen_mov_F0_vreg(dp, rd); + gen_vfp_st(s, dp); + } + } else { + /* load/store multiple */ + if (dp) + n = (insn >> 1) & 0x7f; + else + n = insn & 0xff; + + if (insn & (1 << 24)) /* pre-decrement */ + gen_op_addl_T1_im(-((insn & 0xff) << 2)); + + if (dp) + offset = 8; + else + offset = 4; + for (i = 0; i < n; i++) { + if (insn & (1 << 20)) { + /* load */ + gen_vfp_ld(s, dp); + gen_mov_vreg_F0(dp, rd + i); + } else { + /* store */ + gen_mov_F0_vreg(dp, rd + i); + gen_vfp_st(s, dp); + } + gen_op_addl_T1_im(offset); + } + if (insn & (1 << 21)) { + /* writeback */ + if (insn & (1 << 24)) + offset = -offset * n; + else if (dp && (insn & 1)) + offset = 4; + else + offset = 0; + + if (offset != 0) + gen_op_addl_T1_im(offset); + gen_movl_reg_T1(s, rn); + } + } + } + break; + default: + /* Should never happen. */ + return 1; + } + return 0; +} + +static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest) +{ + TranslationBlock *tb; + + tb = s->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { + if (n == 0) + gen_op_goto_tb0(TBPARAM(tb)); + else + gen_op_goto_tb1(TBPARAM(tb)); + gen_op_movl_T0_im(dest); + gen_op_movl_r15_T0(); + gen_op_movl_T0_im((long)tb + n); + gen_op_exit_tb(); + } else { + gen_op_movl_T0_im(dest); + gen_op_movl_r15_T0(); + gen_op_movl_T0_0(); + gen_op_exit_tb(); + } +} + +static inline void gen_jmp (DisasContext *s, uint32_t dest) +{ + if (__builtin_expect(s->singlestep_enabled, 0)) { + /* An indirect jump so that we still trigger the debug exception. */ + if (s->thumb) + dest |= 1; + gen_op_movl_T0_im(dest); + gen_bx(s); + } else { + gen_goto_tb(s, 0, dest); + s->is_jmp = DISAS_TB_JUMP; + } +} + +static inline void gen_mulxy(int x, int y) +{ + if (x) + gen_op_sarl_T0_im(16); + else + gen_op_sxth_T0(); + if (y) + gen_op_sarl_T1_im(16); + else + gen_op_sxth_T1(); + gen_op_mul_T0_T1(); +} + +/* Return the mask of PSR bits set by a MSR instruction. */ +static uint32_t msr_mask(DisasContext *s, int flags, int spsr) { + uint32_t mask; + + mask = 0; + if (flags & (1 << 0)) + mask |= 0xff; + if (flags & (1 << 1)) + mask |= 0xff00; + if (flags & (1 << 2)) + mask |= 0xff0000; + if (flags & (1 << 3)) + mask |= 0xff000000; + /* Mask out undefined bits. */ + mask &= 0xf90f03ff; + /* Mask out state bits. */ + if (!spsr) + mask &= ~0x01000020; + /* Mask out privileged bits. */ + if (IS_USER(s)) + mask &= 0xf80f0200; + return mask; +} + +/* Returns nonzero if access to the PSR is not permitted. */ +static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr) +{ + if (spsr) { + /* ??? This is also undefined in system mode. */ + if (IS_USER(s)) + return 1; + gen_op_movl_spsr_T0(mask); + } else { + gen_op_movl_cpsr_T0(mask); + } + gen_lookup_tb(s); + return 0; +} + +static void gen_exception_return(DisasContext *s) +{ + gen_op_movl_reg_TN[0][15](); + gen_op_movl_T0_spsr(); + gen_op_movl_cpsr_T0(0xffffffff); + s->is_jmp = DISAS_UPDATE; +} + +static void disas_arm_insn(CPUState * env, DisasContext *s) +{ + unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh; + + insn = ldl_code(s->pc); + s->pc += 4; + + cond = insn >> 28; + if (cond == 0xf){ + /* Unconditional instructions. */ + if ((insn & 0x0d70f000) == 0x0550f000) + return; /* PLD */ + else if ((insn & 0x0e000000) == 0x0a000000) { + /* branch link and change to thumb (blx ) */ + int32_t offset; + + val = (uint32_t)s->pc; + gen_op_movl_T0_im(val); + gen_movl_reg_T0(s, 14); + /* Sign-extend the 24-bit offset */ + offset = (((int32_t)insn) << 8) >> 8; + /* offset * 4 + bit24 * 2 + (thumb bit) */ + val += (offset << 2) | ((insn >> 23) & 2) | 1; + /* pipeline offset */ + val += 4; + gen_op_movl_T0_im(val); + gen_bx(s); + return; + } else if ((insn & 0x0fe00000) == 0x0c400000) { + /* Coprocessor double register transfer. */ + } else if ((insn & 0x0f000010) == 0x0e000010) { + /* Additional coprocessor register transfer. */ + } else if ((insn & 0x0ff10010) == 0x01000000) { + /* cps (privileged) */ + } else if ((insn & 0x0ffffdff) == 0x01010000) { + /* setend */ + if (insn & (1 << 9)) { + /* BE8 mode not implemented. */ + goto illegal_op; + } + return; + } + goto illegal_op; + } + if (cond != 0xe) { + /* if not always execute, we generate a conditional jump to + next instruction */ + s->condlabel = gen_new_label(); + gen_test_cc[cond ^ 1](s->condlabel); + s->condjmp = 1; + //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc); + //s->is_jmp = DISAS_JUMP_NEXT; + } + if ((insn & 0x0f900000) == 0x03000000) { + if ((insn & 0x0fb0f000) != 0x0320f000) + goto illegal_op; + /* CPSR = immediate */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) + val = (val >> shift) | (val << (32 - shift)); + gen_op_movl_T0_im(val); + i = ((insn & (1 << 22)) != 0); + if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i)) + goto illegal_op; + } else if ((insn & 0x0f900000) == 0x01000000 + && (insn & 0x00000090) != 0x00000090) { + /* miscellaneous instructions */ + op1 = (insn >> 21) & 3; + sh = (insn >> 4) & 0xf; + rm = insn & 0xf; + switch (sh) { + case 0x0: /* move program status register */ + if (op1 & 1) { + /* PSR = reg */ + gen_movl_T0_reg(s, rm); + i = ((op1 & 2) != 0); + if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i)) + goto illegal_op; + } else { + /* reg = PSR */ + rd = (insn >> 12) & 0xf; + if (op1 & 2) { + if (IS_USER(s)) + goto illegal_op; + gen_op_movl_T0_spsr(); + } else { + gen_op_movl_T0_cpsr(); + } + gen_movl_reg_T0(s, rd); + } + break; + case 0x1: + if (op1 == 1) { + /* branch/exchange thumb (bx). */ + gen_movl_T0_reg(s, rm); + gen_bx(s); + } else if (op1 == 3) { + /* clz */ + rd = (insn >> 12) & 0xf; + gen_movl_T0_reg(s, rm); + gen_op_clz_T0(); + gen_movl_reg_T0(s, rd); + } else { + goto illegal_op; + } + break; + case 0x2: + if (op1 == 1) { + ARCH(5J); /* bxj */ + /* Trivial implementation equivalent to bx. */ + gen_movl_T0_reg(s, rm); + gen_bx(s); + } else { + goto illegal_op; + } + break; + case 0x3: + if (op1 != 1) + goto illegal_op; + + /* branch link/exchange thumb (blx) */ + val = (uint32_t)s->pc; + gen_op_movl_T0_im(val); + gen_movl_reg_T0(s, 14); + gen_movl_T0_reg(s, rm); + gen_bx(s); + break; + case 0x5: /* saturating add/subtract */ + rd = (insn >> 12) & 0xf; + rn = (insn >> 16) & 0xf; + gen_movl_T0_reg(s, rm); + gen_movl_T1_reg(s, rn); + if (op1 & 2) + gen_op_double_T1_saturate(); + if (op1 & 1) + gen_op_subl_T0_T1_saturate(); + else + gen_op_addl_T0_T1_saturate(); + gen_movl_reg_T0(s, rd); + break; + case 7: /* bkpt */ + gen_op_movl_T0_im((long)s->pc - 4); + gen_op_movl_reg_TN[0][15](); + gen_op_bkpt(); + s->is_jmp = DISAS_JUMP; + break; + case 0x8: /* signed multiply */ + case 0xa: + case 0xc: + case 0xe: + rs = (insn >> 8) & 0xf; + rn = (insn >> 12) & 0xf; + rd = (insn >> 16) & 0xf; + if (op1 == 1) { + /* (32 * 16) >> 16 */ + gen_movl_T0_reg(s, rm); + gen_movl_T1_reg(s, rs); + if (sh & 4) + gen_op_sarl_T1_im(16); + else + gen_op_sxth_T1(); + gen_op_imulw_T0_T1(); + if ((sh & 2) == 0) { + gen_movl_T1_reg(s, rn); + gen_op_addl_T0_T1_setq(); + } + gen_movl_reg_T0(s, rd); + } else { + /* 16 * 16 */ + gen_movl_T0_reg(s, rm); + gen_movl_T1_reg(s, rs); + gen_mulxy(sh & 2, sh & 4); + if (op1 == 2) { + gen_op_signbit_T1_T0(); + gen_op_addq_T0_T1(rn, rd); + gen_movl_reg_T0(s, rn); + gen_movl_reg_T1(s, rd); + } else { + if (op1 == 0) { + gen_movl_T1_reg(s, rn); + gen_op_addl_T0_T1_setq(); + } + gen_movl_reg_T0(s, rd); + } + } + break; + default: + goto illegal_op; + } + } else if (((insn & 0x0e000000) == 0 && + (insn & 0x00000090) != 0x90) || + ((insn & 0x0e000000) == (1 << 25))) { + int set_cc, logic_cc, shiftop; + + op1 = (insn >> 21) & 0xf; + set_cc = (insn >> 20) & 1; + logic_cc = table_logic_cc[op1] & set_cc; + + /* data processing instruction */ + if (insn & (1 << 25)) { + /* immediate operand */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) + val = (val >> shift) | (val << (32 - shift)); + gen_op_movl_T1_im(val); + if (logic_cc && shift) + gen_op_mov_CF_T1(); + } else { + /* register */ + rm = (insn) & 0xf; + gen_movl_T1_reg(s, rm); + shiftop = (insn >> 5) & 3; + if (!(insn & (1 << 4))) { + shift = (insn >> 7) & 0x1f; + if (shift != 0) { + if (logic_cc) { + gen_shift_T1_im_cc[shiftop](shift); + } else { + gen_shift_T1_im[shiftop](shift); + } + } else if (shiftop != 0) { + if (logic_cc) { + gen_shift_T1_0_cc[shiftop](); + } else { + gen_shift_T1_0[shiftop](); + } + } + } else { + rs = (insn >> 8) & 0xf; + gen_movl_T0_reg(s, rs); + if (logic_cc) { + gen_shift_T1_T0_cc[shiftop](); + } else { + gen_shift_T1_T0[shiftop](); + } + } + } + if (op1 != 0x0f && op1 != 0x0d) { + rn = (insn >> 16) & 0xf; + gen_movl_T0_reg(s, rn); + } + rd = (insn >> 12) & 0xf; + switch(op1) { + case 0x00: + gen_op_andl_T0_T1(); + gen_movl_reg_T0(s, rd); + if (logic_cc) + gen_op_logic_T0_cc(); + break; + case 0x01: + gen_op_xorl_T0_T1(); + gen_movl_reg_T0(s, rd); + if (logic_cc) + gen_op_logic_T0_cc(); + break; + case 0x02: + if (set_cc && rd == 15) { + /* SUBS r15, ... is used for exception return. */ + if (IS_USER(s)) + goto illegal_op; + gen_op_subl_T0_T1_cc(); + gen_exception_return(s); + } else { + if (set_cc) + gen_op_subl_T0_T1_cc(); + else + gen_op_subl_T0_T1(); + gen_movl_reg_T0(s, rd); + } + break; + case 0x03: + if (set_cc) + gen_op_rsbl_T0_T1_cc(); + else + gen_op_rsbl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 0x04: + if (set_cc) + gen_op_addl_T0_T1_cc(); + else + gen_op_addl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 0x05: + if (set_cc) + gen_op_adcl_T0_T1_cc(); + else + gen_op_adcl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 0x06: + if (set_cc) + gen_op_sbcl_T0_T1_cc(); + else + gen_op_sbcl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 0x07: + if (set_cc) + gen_op_rscl_T0_T1_cc(); + else + gen_op_rscl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 0x08: + if (set_cc) { + gen_op_andl_T0_T1(); + gen_op_logic_T0_cc(); + } + break; + case 0x09: + if (set_cc) { + gen_op_xorl_T0_T1(); + gen_op_logic_T0_cc(); + } + break; + case 0x0a: + if (set_cc) { + gen_op_subl_T0_T1_cc(); + } + break; + case 0x0b: + if (set_cc) { + gen_op_addl_T0_T1_cc(); + } + break; + case 0x0c: + gen_op_orl_T0_T1(); + gen_movl_reg_T0(s, rd); + if (logic_cc) + gen_op_logic_T0_cc(); + break; + case 0x0d: + if (logic_cc && rd == 15) { + /* MOVS r15, ... is used for exception return. */ + if (IS_USER(s)) + goto illegal_op; + gen_op_movl_T0_T1(); + gen_exception_return(s); + } else { + gen_movl_reg_T1(s, rd); + if (logic_cc) + gen_op_logic_T1_cc(); + } + break; + case 0x0e: + gen_op_bicl_T0_T1(); + gen_movl_reg_T0(s, rd); + if (logic_cc) + gen_op_logic_T0_cc(); + break; + default: + case 0x0f: + gen_op_notl_T1(); + gen_movl_reg_T1(s, rd); + if (logic_cc) + gen_op_logic_T1_cc(); + break; + } + } else { + /* other instructions */ + op1 = (insn >> 24) & 0xf; + switch(op1) { + case 0x0: + case 0x1: + /* multiplies, extra load/stores */ + sh = (insn >> 5) & 3; + if (sh == 0) { + if (op1 == 0x0) { + rd = (insn >> 16) & 0xf; + rn = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + rm = (insn) & 0xf; + if (((insn >> 22) & 3) == 0) { + /* 32 bit mul */ + gen_movl_T0_reg(s, rs); + gen_movl_T1_reg(s, rm); + gen_op_mul_T0_T1(); + if (insn & (1 << 21)) { + gen_movl_T1_reg(s, rn); + gen_op_addl_T0_T1(); + } + if (insn & (1 << 20)) + gen_op_logic_T0_cc(); + gen_movl_reg_T0(s, rd); + } else { + /* 64 bit mul */ + gen_movl_T0_reg(s, rs); + gen_movl_T1_reg(s, rm); + if (insn & (1 << 22)) + gen_op_imull_T0_T1(); + else + gen_op_mull_T0_T1(); + if (insn & (1 << 21)) /* mult accumulate */ + gen_op_addq_T0_T1(rn, rd); + if (!(insn & (1 << 23))) { /* double accumulate */ + ARCH(6); + gen_op_addq_lo_T0_T1(rn); + gen_op_addq_lo_T0_T1(rd); + } + if (insn & (1 << 20)) + gen_op_logicq_cc(); + gen_movl_reg_T0(s, rn); + gen_movl_reg_T1(s, rd); + } + } else { + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (insn & (1 << 23)) { + /* load/store exclusive */ + goto illegal_op; + } else { + /* SWP instruction */ + rm = (insn) & 0xf; + + gen_movl_T0_reg(s, rm); + gen_movl_T1_reg(s, rn); + if (insn & (1 << 22)) { + gen_ldst(swpb, s); + } else { + gen_ldst(swpl, s); + } + gen_movl_reg_T0(s, rd); + } + } + } else { + int address_offset; + /* Misc load/store */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + gen_movl_T1_reg(s, rn); + if (insn & (1 << 24)) + gen_add_datah_offset(s, insn, 0); + address_offset = 0; + if (insn & (1 << 20)) { + /* load */ + switch(sh) { + case 1: + gen_ldst(lduw, s); + break; + case 2: + gen_ldst(ldsb, s); + break; + default: + case 3: + gen_ldst(ldsw, s); + break; + } + gen_movl_reg_T0(s, rd); + } else if (sh & 2) { + /* doubleword */ + if (sh & 1) { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stl, s); + gen_op_addl_T1_im(4); + gen_movl_T0_reg(s, rd + 1); + gen_ldst(stl, s); + } else { + /* load */ + gen_ldst(ldl, s); + gen_movl_reg_T0(s, rd); + gen_op_addl_T1_im(4); + gen_ldst(ldl, s); + gen_movl_reg_T0(s, rd + 1); + } + address_offset = -4; + } else { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stw, s); + } + if (!(insn & (1 << 24))) { + gen_add_datah_offset(s, insn, address_offset); + gen_movl_reg_T1(s, rn); + } else if (insn & (1 << 21)) { + if (address_offset) + gen_op_addl_T1_im(address_offset); + gen_movl_reg_T1(s, rn); + } + } + break; + case 0x4: + case 0x5: + case 0x6: + case 0x7: + /* Check for undefined extension instructions + * per the ARM Bible IE: + * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx + */ + sh = (0xf << 20) | (0xf << 4); + if (op1 == 0x7 && ((insn & sh) == sh)) + { + goto illegal_op; + } + /* load/store byte/word */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + gen_movl_T1_reg(s, rn); + i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000); + if (insn & (1 << 24)) + gen_add_data_offset(s, insn); + if (insn & (1 << 20)) { + /* load */ +#if defined(CONFIG_USER_ONLY) + if (insn & (1 << 22)) + gen_op_ldub_raw(); + else + gen_op_ldl_raw(); +#else + if (insn & (1 << 22)) { + if (i) + gen_op_ldub_user(); + else + gen_op_ldub_kernel(); + } else { + if (i) + gen_op_ldl_user(); + else + gen_op_ldl_kernel(); + } +#endif + if (rd == 15) + gen_bx(s); + else + gen_movl_reg_T0(s, rd); + } else { + /* store */ + gen_movl_T0_reg(s, rd); +#if defined(CONFIG_USER_ONLY) + if (insn & (1 << 22)) + gen_op_stb_raw(); + else + gen_op_stl_raw(); +#else + if (insn & (1 << 22)) { + if (i) + gen_op_stb_user(); + else + gen_op_stb_kernel(); + } else { + if (i) + gen_op_stl_user(); + else + gen_op_stl_kernel(); + } +#endif + } + if (!(insn & (1 << 24))) { + gen_add_data_offset(s, insn); + gen_movl_reg_T1(s, rn); + } else if (insn & (1 << 21)) + gen_movl_reg_T1(s, rn); { + } + break; + case 0x08: + case 0x09: + { + int j, n, user, loaded_base; + /* load/store multiple words */ + /* XXX: store correct base if write back */ + user = 0; + if (insn & (1 << 22)) { + if (IS_USER(s)) + goto illegal_op; /* only usable in supervisor mode */ + + if ((insn & (1 << 15)) == 0) + user = 1; + } + rn = (insn >> 16) & 0xf; + gen_movl_T1_reg(s, rn); + + /* compute total size */ + loaded_base = 0; + n = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) + n++; + } + /* XXX: test invalid n == 0 case ? */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + gen_op_addl_T1_im(4); + } else { + /* post increment */ + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + gen_op_addl_T1_im(-(n * 4)); + } else { + /* post decrement */ + if (n != 1) + gen_op_addl_T1_im(-((n - 1) * 4)); + } + } + j = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) { + if (insn & (1 << 20)) { + /* load */ + gen_ldst(ldl, s); + if (i == 15) { + gen_bx(s); + } else if (user) { + gen_op_movl_user_T0(i); + } else if (i == rn) { + gen_op_movl_T2_T0(); + loaded_base = 1; + } else { + gen_movl_reg_T0(s, i); + } + } else { + /* store */ + if (i == 15) { + /* special case: r15 = PC + 12 */ + val = (long)s->pc + 8; + gen_op_movl_TN_im[0](val); + } else if (user) { + gen_op_movl_T0_user(i); + } else { + gen_movl_T0_reg(s, i); + } + gen_ldst(stl, s); + } + j++; + /* no need to add after the last transfer */ + if (j != n) + gen_op_addl_T1_im(4); + } + } + if (insn & (1 << 21)) { + /* write back */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + } else { + /* post increment */ + gen_op_addl_T1_im(4); + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + if (n != 1) + gen_op_addl_T1_im(-((n - 1) * 4)); + } else { + /* post decrement */ + gen_op_addl_T1_im(-(n * 4)); + } + } + gen_movl_reg_T1(s, rn); + } + if (loaded_base) { + gen_op_movl_T0_T2(); + gen_movl_reg_T0(s, rn); + } + if ((insn & (1 << 22)) && !user) { + /* Restore CPSR from SPSR. */ + gen_op_movl_T0_spsr(); + gen_op_movl_cpsr_T0(0xffffffff); + s->is_jmp = DISAS_UPDATE; + } + } + break; + case 0xa: + case 0xb: + { + int32_t offset; + + /* branch (and link) */ + val = (int32_t)s->pc; + if (insn & (1 << 24)) { + gen_op_movl_T0_im(val); + gen_op_movl_reg_TN[0][14](); + } + offset = (((int32_t)insn << 8) >> 8); + val += (offset << 2) + 4; + gen_jmp(s, val); + } + break; + case 0xc: + case 0xd: + case 0xe: + /* Coprocessor. */ + op1 = (insn >> 8) & 0xf; + switch (op1) { + case 10: + case 11: + if (disas_vfp_insn (env, s, insn)) + goto illegal_op; + break; + case 15: + if (disas_cp15_insn (s, insn)) + goto illegal_op; + break; + default: + /* unknown coprocessor. */ + goto illegal_op; + } + break; + case 0xf: + /* swi */ + gen_op_movl_T0_im((long)s->pc); + gen_op_movl_reg_TN[0][15](); + gen_op_swi(); + s->is_jmp = DISAS_JUMP; + break; + default: + illegal_op: + gen_op_movl_T0_im((long)s->pc - 4); + gen_op_movl_reg_TN[0][15](); + gen_op_undef_insn(); + s->is_jmp = DISAS_JUMP; + break; + } + } +} + +static void disas_thumb_insn(DisasContext *s) +{ + uint32_t val, insn, op, rm, rn, rd, shift, cond; + int32_t offset; + int i; + + insn = lduw_code(s->pc); + s->pc += 2; + + switch (insn >> 12) { + case 0: case 1: + rd = insn & 7; + op = (insn >> 11) & 3; + if (op == 3) { + /* add/subtract */ + rn = (insn >> 3) & 7; + gen_movl_T0_reg(s, rn); + if (insn & (1 << 10)) { + /* immediate */ + gen_op_movl_T1_im((insn >> 6) & 7); + } else { + /* reg */ + rm = (insn >> 6) & 7; + gen_movl_T1_reg(s, rm); + } + if (insn & (1 << 9)) + gen_op_subl_T0_T1_cc(); + else + gen_op_addl_T0_T1_cc(); + gen_movl_reg_T0(s, rd); + } else { + /* shift immediate */ + rm = (insn >> 3) & 7; + shift = (insn >> 6) & 0x1f; + gen_movl_T0_reg(s, rm); + gen_shift_T0_im_thumb[op](shift); + gen_movl_reg_T0(s, rd); + } + break; + case 2: case 3: + /* arithmetic large immediate */ + op = (insn >> 11) & 3; + rd = (insn >> 8) & 0x7; + if (op == 0) { + gen_op_movl_T0_im(insn & 0xff); + } else { + gen_movl_T0_reg(s, rd); + gen_op_movl_T1_im(insn & 0xff); + } + switch (op) { + case 0: /* mov */ + gen_op_logic_T0_cc(); + break; + case 1: /* cmp */ + gen_op_subl_T0_T1_cc(); + break; + case 2: /* add */ + gen_op_addl_T0_T1_cc(); + break; + case 3: /* sub */ + gen_op_subl_T0_T1_cc(); + break; + } + if (op != 1) + gen_movl_reg_T0(s, rd); + break; + case 4: + if (insn & (1 << 11)) { + rd = (insn >> 8) & 7; + /* load pc-relative. Bit 1 of PC is ignored. */ + val = s->pc + 2 + ((insn & 0xff) * 4); + val &= ~(uint32_t)2; + gen_op_movl_T1_im(val); + gen_ldst(ldl, s); + gen_movl_reg_T0(s, rd); + break; + } + if (insn & (1 << 10)) { + /* data processing extended or blx */ + rd = (insn & 7) | ((insn >> 4) & 8); + rm = (insn >> 3) & 0xf; + op = (insn >> 8) & 3; + switch (op) { + case 0: /* add */ + gen_movl_T0_reg(s, rd); + gen_movl_T1_reg(s, rm); + gen_op_addl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + case 1: /* cmp */ + gen_movl_T0_reg(s, rd); + gen_movl_T1_reg(s, rm); + gen_op_subl_T0_T1_cc(); + break; + case 2: /* mov/cpy */ + gen_movl_T0_reg(s, rm); + gen_movl_reg_T0(s, rd); + break; + case 3:/* branch [and link] exchange thumb register */ + if (insn & (1 << 7)) { + val = (uint32_t)s->pc | 1; + gen_op_movl_T1_im(val); + gen_movl_reg_T1(s, 14); + } + gen_movl_T0_reg(s, rm); + gen_bx(s); + break; + } + break; + } + + /* data processing register */ + rd = insn & 7; + rm = (insn >> 3) & 7; + op = (insn >> 6) & 0xf; + if (op == 2 || op == 3 || op == 4 || op == 7) { + /* the shift/rotate ops want the operands backwards */ + val = rm; + rm = rd; + rd = val; + val = 1; + } else { + val = 0; + } + + if (op == 9) /* neg */ + gen_op_movl_T0_im(0); + else if (op != 0xf) /* mvn doesn't read its first operand */ + gen_movl_T0_reg(s, rd); + + gen_movl_T1_reg(s, rm); + switch (op) { + case 0x0: /* and */ + gen_op_andl_T0_T1(); + gen_op_logic_T0_cc(); + break; + case 0x1: /* eor */ + gen_op_xorl_T0_T1(); + gen_op_logic_T0_cc(); + break; + case 0x2: /* lsl */ + gen_op_shll_T1_T0_cc(); + gen_op_logic_T1_cc(); + break; + case 0x3: /* lsr */ + gen_op_shrl_T1_T0_cc(); + gen_op_logic_T1_cc(); + break; + case 0x4: /* asr */ + gen_op_sarl_T1_T0_cc(); + gen_op_logic_T1_cc(); + break; + case 0x5: /* adc */ + gen_op_adcl_T0_T1_cc(); + break; + case 0x6: /* sbc */ + gen_op_sbcl_T0_T1_cc(); + break; + case 0x7: /* ror */ + gen_op_rorl_T1_T0_cc(); + gen_op_logic_T1_cc(); + break; + case 0x8: /* tst */ + gen_op_andl_T0_T1(); + gen_op_logic_T0_cc(); + rd = 16; + break; + case 0x9: /* neg */ + gen_op_subl_T0_T1_cc(); + break; + case 0xa: /* cmp */ + gen_op_subl_T0_T1_cc(); + rd = 16; + break; + case 0xb: /* cmn */ + gen_op_addl_T0_T1_cc(); + rd = 16; + break; + case 0xc: /* orr */ + gen_op_orl_T0_T1(); + gen_op_logic_T0_cc(); + break; + case 0xd: /* mul */ + gen_op_mull_T0_T1(); + gen_op_logic_T0_cc(); + break; + case 0xe: /* bic */ + gen_op_bicl_T0_T1(); + gen_op_logic_T0_cc(); + break; + case 0xf: /* mvn */ + gen_op_notl_T1(); + gen_op_logic_T1_cc(); + val = 1; + rm = rd; + break; + } + if (rd != 16) { + if (val) + gen_movl_reg_T1(s, rm); + else + gen_movl_reg_T0(s, rd); + } + break; + + case 5: + /* load/store register offset. */ + rd = insn & 7; + rn = (insn >> 3) & 7; + rm = (insn >> 6) & 7; + op = (insn >> 9) & 7; + gen_movl_T1_reg(s, rn); + gen_movl_T2_reg(s, rm); + gen_op_addl_T1_T2(); + + if (op < 3) /* store */ + gen_movl_T0_reg(s, rd); + + switch (op) { + case 0: /* str */ + gen_ldst(stl, s); + break; + case 1: /* strh */ + gen_ldst(stw, s); + break; + case 2: /* strb */ + gen_ldst(stb, s); + break; + case 3: /* ldrsb */ + gen_ldst(ldsb, s); + break; + case 4: /* ldr */ + gen_ldst(ldl, s); + break; + case 5: /* ldrh */ + gen_ldst(lduw, s); + break; + case 6: /* ldrb */ + gen_ldst(ldub, s); + break; + case 7: /* ldrsh */ + gen_ldst(ldsw, s); + break; + } + if (op >= 3) /* load */ + gen_movl_reg_T0(s, rd); + break; + + case 6: + /* load/store word immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + gen_movl_T1_reg(s, rn); + val = (insn >> 4) & 0x7c; + gen_op_movl_T2_im(val); + gen_op_addl_T1_T2(); + + if (insn & (1 << 11)) { + /* load */ + gen_ldst(ldl, s); + gen_movl_reg_T0(s, rd); + } else { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stl, s); + } + break; + + case 7: + /* load/store byte immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + gen_movl_T1_reg(s, rn); + val = (insn >> 6) & 0x1f; + gen_op_movl_T2_im(val); + gen_op_addl_T1_T2(); + + if (insn & (1 << 11)) { + /* load */ + gen_ldst(ldub, s); + gen_movl_reg_T0(s, rd); + } else { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stb, s); + } + break; + + case 8: + /* load/store halfword immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + gen_movl_T1_reg(s, rn); + val = (insn >> 5) & 0x3e; + gen_op_movl_T2_im(val); + gen_op_addl_T1_T2(); + + if (insn & (1 << 11)) { + /* load */ + gen_ldst(lduw, s); + gen_movl_reg_T0(s, rd); + } else { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stw, s); + } + break; + + case 9: + /* load/store from stack */ + rd = (insn >> 8) & 7; + gen_movl_T1_reg(s, 13); + val = (insn & 0xff) * 4; + gen_op_movl_T2_im(val); + gen_op_addl_T1_T2(); + + if (insn & (1 << 11)) { + /* load */ + gen_ldst(ldl, s); + gen_movl_reg_T0(s, rd); + } else { + /* store */ + gen_movl_T0_reg(s, rd); + gen_ldst(stl, s); + } + break; + + case 10: + /* add to high reg */ + rd = (insn >> 8) & 7; + if (insn & (1 << 11)) { + /* SP */ + gen_movl_T0_reg(s, 13); + } else { + /* PC. bit 1 is ignored. */ + gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2); + } + val = (insn & 0xff) * 4; + gen_op_movl_T1_im(val); + gen_op_addl_T0_T1(); + gen_movl_reg_T0(s, rd); + break; + + case 11: + /* misc */ + op = (insn >> 8) & 0xf; + switch (op) { + case 0: + /* adjust stack pointer */ + gen_movl_T1_reg(s, 13); + val = (insn & 0x7f) * 4; + if (insn & (1 << 7)) + val = -(int32_t)val; + gen_op_movl_T2_im(val); + gen_op_addl_T1_T2(); + gen_movl_reg_T1(s, 13); + break; + + case 4: case 5: case 0xc: case 0xd: + /* push/pop */ + gen_movl_T1_reg(s, 13); + if (insn & (1 << 8)) + offset = 4; + else + offset = 0; + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) + offset += 4; + } + if ((insn & (1 << 11)) == 0) { + gen_op_movl_T2_im(-offset); + gen_op_addl_T1_T2(); + } + gen_op_movl_T2_im(4); + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* pop */ + gen_ldst(ldl, s); + gen_movl_reg_T0(s, i); + } else { + /* push */ + gen_movl_T0_reg(s, i); + gen_ldst(stl, s); + } + /* advance to the next address. */ + gen_op_addl_T1_T2(); + } + } + if (insn & (1 << 8)) { + if (insn & (1 << 11)) { + /* pop pc */ + gen_ldst(ldl, s); + /* don't set the pc until the rest of the instruction + has completed */ + } else { + /* push lr */ + gen_movl_T0_reg(s, 14); + gen_ldst(stl, s); + } + gen_op_addl_T1_T2(); + } + if ((insn & (1 << 11)) == 0) { + gen_op_movl_T2_im(-offset); + gen_op_addl_T1_T2(); + } + /* write back the new stack pointer */ + gen_movl_reg_T1(s, 13); + /* set the new PC value */ + if ((insn & 0x0900) == 0x0900) + gen_bx(s); + break; + + case 0xe: /* bkpt */ + gen_op_movl_T0_im((long)s->pc - 2); + gen_op_movl_reg_TN[0][15](); + gen_op_bkpt(); + s->is_jmp = DISAS_JUMP; + break; + + default: + goto undef; + } + break; + + case 12: + /* load/store multiple */ + rn = (insn >> 8) & 0x7; + gen_movl_T1_reg(s, rn); + gen_op_movl_T2_im(4); + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* load */ + gen_ldst(ldl, s); + gen_movl_reg_T0(s, i); + } else { + /* store */ + gen_movl_T0_reg(s, i); + gen_ldst(stl, s); + } + /* advance to the next address */ + gen_op_addl_T1_T2(); + } + } + /* Base register writeback. */ + if ((insn & (1 << rn)) == 0) + gen_movl_reg_T1(s, rn); + break; + + case 13: + /* conditional branch or swi */ + cond = (insn >> 8) & 0xf; + if (cond == 0xe) + goto undef; + + if (cond == 0xf) { + /* swi */ + gen_op_movl_T0_im((long)s->pc | 1); + /* Don't set r15. */ + gen_op_movl_reg_TN[0][15](); + gen_op_swi(); + s->is_jmp = DISAS_JUMP; + break; + } + /* generate a conditional jump to next instruction */ + s->condlabel = gen_new_label(); + gen_test_cc[cond ^ 1](s->condlabel); + s->condjmp = 1; + //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc); + //s->is_jmp = DISAS_JUMP_NEXT; + gen_movl_T1_reg(s, 15); + + /* jump to the offset */ + val = (uint32_t)s->pc + 2; + offset = ((int32_t)insn << 24) >> 24; + val += offset << 1; + gen_jmp(s, val); + break; + + case 14: + /* unconditional branch */ + if (insn & (1 << 11)) { + /* Second half of blx. */ + offset = ((insn & 0x7ff) << 1); + gen_movl_T0_reg(s, 14); + gen_op_movl_T1_im(offset); + gen_op_addl_T0_T1(); + gen_op_movl_T1_im(0xfffffffc); + gen_op_andl_T0_T1(); + + val = (uint32_t)s->pc; + gen_op_movl_T1_im(val | 1); + gen_movl_reg_T1(s, 14); + gen_bx(s); + break; + } + val = (uint32_t)s->pc; + offset = ((int32_t)insn << 21) >> 21; + val += (offset << 1) + 2; + gen_jmp(s, val); + break; + + case 15: + /* branch and link [and switch to arm] */ + if ((s->pc & ~TARGET_PAGE_MASK) == 0) { + /* Instruction spans a page boundary. Implement it as two + 16-bit instructions in case the second half causes an + prefetch abort. */ + offset = ((int32_t)insn << 21) >> 9; + val = s->pc + 2 + offset; + gen_op_movl_T0_im(val); + gen_movl_reg_T0(s, 14); + break; + } + if (insn & (1 << 11)) { + /* Second half of bl. */ + offset = ((insn & 0x7ff) << 1) | 1; + gen_movl_T0_reg(s, 14); + gen_op_movl_T1_im(offset); + gen_op_addl_T0_T1(); + + val = (uint32_t)s->pc; + gen_op_movl_T1_im(val | 1); + gen_movl_reg_T1(s, 14); + gen_bx(s); + break; + } + offset = ((int32_t)insn << 21) >> 10; + insn = lduw_code(s->pc); + offset |= insn & 0x7ff; + + val = (uint32_t)s->pc + 2; + gen_op_movl_T1_im(val | 1); + gen_movl_reg_T1(s, 14); + + val += offset << 1; + if (insn & (1 << 12)) { + /* bl */ + gen_jmp(s, val); + } else { + /* blx */ + val &= ~(uint32_t)2; + gen_op_movl_T0_im(val); + gen_bx(s); + } + } + return; +undef: + gen_op_movl_T0_im((long)s->pc - 2); + gen_op_movl_reg_TN[0][15](); + gen_op_undef_insn(); + s->is_jmp = DISAS_JUMP; +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +static inline int gen_intermediate_code_internal(CPUState *env, + TranslationBlock *tb, + int search_pc) +{ + DisasContext dc1, *dc = &dc1; + uint16_t *gen_opc_end; + int j, lj; + target_ulong pc_start; + uint32_t next_page_start; + + /* generate intermediate code */ + pc_start = tb->pc; + + dc->tb = tb; + + gen_opc_ptr = gen_opc_buf; + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + gen_opparam_ptr = gen_opparam_buf; + + dc->is_jmp = DISAS_NEXT; + dc->pc = pc_start; + dc->singlestep_enabled = env->singlestep_enabled; + dc->condjmp = 0; + dc->thumb = env->thumb; +#if !defined(CONFIG_USER_ONLY) + dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR; +#endif + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + nb_gen_labels = 0; + lj = -1; + do { + if (env->nb_breakpoints > 0) { + for(j = 0; j < env->nb_breakpoints; j++) { + if (env->breakpoints[j] == dc->pc) { + gen_op_movl_T0_im((long)dc->pc); + gen_op_movl_reg_TN[0][15](); + gen_op_debug(); + dc->is_jmp = DISAS_JUMP; + break; + } + } + } + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + gen_opc_instr_start[lj++] = 0; + } + gen_opc_pc[lj] = dc->pc; + gen_opc_instr_start[lj] = 1; + } + + if (env->thumb) + disas_thumb_insn(dc); + else + disas_arm_insn(env, dc); + + if (dc->condjmp && !dc->is_jmp) { + gen_set_label(dc->condlabel); + dc->condjmp = 0; + } + /* Translation stops when a conditional branch is enoutered. + * Otherwise the subsequent code could get translated several times. + * Also stop translation when a page boundary is reached. This + * ensures prefech aborts occur at the right place. */ + } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && + !env->singlestep_enabled && + dc->pc < next_page_start); + /* At this stage dc->condjmp will only be set when the skipped + * instruction was a conditional branch, and the PC has already been + * written. */ + if (__builtin_expect(env->singlestep_enabled, 0)) { + /* Make sure the pc is updated, and raise a debug exception. */ + if (dc->condjmp) { + gen_op_debug(); + gen_set_label(dc->condlabel); + } + if (dc->condjmp || !dc->is_jmp) { + gen_op_movl_T0_im((long)dc->pc); + gen_op_movl_reg_TN[0][15](); + dc->condjmp = 0; + } + gen_op_debug(); + } else { + switch(dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* indicate that the hash table must be used to find the next TB */ + gen_op_movl_T0_0(); + gen_op_exit_tb(); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + } + if (dc->condjmp) { + gen_set_label(dc->condlabel); + gen_goto_tb(dc, 1, dc->pc); + dc->condjmp = 0; + } + } + *gen_opc_ptr = INDEX_op_end; + +#ifdef DEBUG_DISAS + if (loglevel & CPU_LOG_TB_IN_ASM) { + fprintf(logfile, "----------------\n"); + fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); + target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb); + fprintf(logfile, "\n"); + if (loglevel & (CPU_LOG_TB_OP)) { + fprintf(logfile, "OP:\n"); + dump_ops(gen_opc_buf, gen_opparam_buf); + fprintf(logfile, "\n"); + } + } +#endif + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) + gen_opc_instr_start[lj++] = 0; + tb->size = 0; + } else { + tb->size = dc->pc - pc_start; + } + return 0; +} + +int gen_intermediate_code(CPUState *env, TranslationBlock *tb) +{ + return gen_intermediate_code_internal(env, tb, 0); +} + +int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb) +{ + return gen_intermediate_code_internal(env, tb, 1); +} + +static const char *cpu_mode_names[16] = { + "usr", "fiq", "irq", "svc", "???", "???", "???", "abt", + "???", "???", "???", "und", "???", "???", "???", "sys" +}; +void cpu_dump_state(CPUState *env, FILE *f, + int (*cpu_fprintf)(FILE *f, const char *fmt, ...), + int flags) +{ + int i; + union { + uint32_t i; + float s; + } s0, s1; + CPU_DoubleU d; + uint32_t psr; + + for(i=0;i<16;i++) { + cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); + if ((i % 4) == 3) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + psr = cpsr_read(env); + cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n", + psr, + psr & (1 << 31) ? 'N' : '-', + psr & (1 << 30) ? 'Z' : '-', + psr & (1 << 29) ? 'C' : '-', + psr & (1 << 28) ? 'V' : '-', + psr & CPSR_T ? 'T' : 'A', + cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); + + for (i = 0; i < 16; i++) { + d.d = env->vfp.regs[i]; + s0.i = d.l.lower; + s1.i = d.l.upper; + cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n", + i * 2, (int)s0.i, s0.s, + i * 2 + 1, (int)s0.i, s0.s, + i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower, + d.d); + } + cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]); +} + -- cgit v1.1