diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.h | 653 | ||||
-rw-r--r-- | target-i386/exec.h | 575 | ||||
-rw-r--r-- | target-i386/helper.c | 3540 | ||||
-rw-r--r-- | target-i386/helper2.c | 1031 | ||||
-rw-r--r-- | target-i386/op.c | 2444 | ||||
-rw-r--r-- | target-i386/opreg_template.h | 190 | ||||
-rw-r--r-- | target-i386/ops_mem.h | 156 | ||||
-rw-r--r-- | target-i386/ops_sse.h | 1374 | ||||
-rw-r--r-- | target-i386/ops_template.h | 597 | ||||
-rw-r--r-- | target-i386/ops_template_mem.h | 483 | ||||
-rw-r--r-- | target-i386/translate-copy.c | 1323 | ||||
-rw-r--r-- | target-i386/translate.c | 6523 |
12 files changed, 18889 insertions, 0 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h new file mode 100644 index 0000000..2f23617 --- /dev/null +++ b/target-i386/cpu.h @@ -0,0 +1,653 @@ +/* + * i386 virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef CPU_I386_H +#define CPU_I386_H + +#include "config.h" + +#ifdef TARGET_X86_64 +#define TARGET_LONG_BITS 64 +#else +#define TARGET_LONG_BITS 32 +#endif + +/* target supports implicit self modifying code */ +#define TARGET_HAS_SMC +/* support for self modifying code even if the modified instruction is + close to the modifying instruction */ +#define TARGET_HAS_PRECISE_SMC + +#define TARGET_HAS_ICE 1 + +#include "cpu-defs.h" + +#include "softfloat.h" + +#if defined(__i386__) && !defined(CONFIG_SOFTMMU) +#define USE_CODE_COPY +#endif + +#define R_EAX 0 +#define R_ECX 1 +#define R_EDX 2 +#define R_EBX 3 +#define R_ESP 4 +#define R_EBP 5 +#define R_ESI 6 +#define R_EDI 7 + +#define R_AL 0 +#define R_CL 1 +#define R_DL 2 +#define R_BL 3 +#define R_AH 4 +#define R_CH 5 +#define R_DH 6 +#define R_BH 7 + +#define R_ES 0 +#define R_CS 1 +#define R_SS 2 +#define R_DS 3 +#define R_FS 4 +#define R_GS 5 + +/* segment descriptor fields */ +#define DESC_G_MASK (1 << 23) +#define DESC_B_SHIFT 22 +#define DESC_B_MASK (1 << DESC_B_SHIFT) +#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ +#define DESC_L_MASK (1 << DESC_L_SHIFT) +#define DESC_AVL_MASK (1 << 20) +#define DESC_P_MASK (1 << 15) +#define DESC_DPL_SHIFT 13 +#define DESC_S_MASK (1 << 12) +#define DESC_TYPE_SHIFT 8 +#define DESC_A_MASK (1 << 8) + +#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ +#define DESC_C_MASK (1 << 10) /* code: conforming */ +#define DESC_R_MASK (1 << 9) /* code: readable */ + +#define DESC_E_MASK (1 << 10) /* data: expansion direction */ +#define DESC_W_MASK (1 << 9) /* data: writable */ + +#define DESC_TSS_BUSY_MASK (1 << 9) + +/* eflags masks */ +#define CC_C 0x0001 +#define CC_P 0x0004 +#define CC_A 0x0010 +#define CC_Z 0x0040 +#define CC_S 0x0080 +#define CC_O 0x0800 + +#define TF_SHIFT 8 +#define IOPL_SHIFT 12 +#define VM_SHIFT 17 + +#define TF_MASK 0x00000100 +#define IF_MASK 0x00000200 +#define DF_MASK 0x00000400 +#define IOPL_MASK 0x00003000 +#define NT_MASK 0x00004000 +#define RF_MASK 0x00010000 +#define VM_MASK 0x00020000 +#define AC_MASK 0x00040000 +#define VIF_MASK 0x00080000 +#define VIP_MASK 0x00100000 +#define ID_MASK 0x00200000 + +/* hidden flags - used internally by qemu to represent additionnal cpu + states. Only the CPL, INHIBIT_IRQ and HALTED are not redundant. We avoid + using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring + with eflags. */ +/* current cpl */ +#define HF_CPL_SHIFT 0 +/* true if soft mmu is being used */ +#define HF_SOFTMMU_SHIFT 2 +/* true if hardware interrupts must be disabled for next instruction */ +#define HF_INHIBIT_IRQ_SHIFT 3 +/* 16 or 32 segments */ +#define HF_CS32_SHIFT 4 +#define HF_SS32_SHIFT 5 +/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ +#define HF_ADDSEG_SHIFT 6 +/* copy of CR0.PE (protected mode) */ +#define HF_PE_SHIFT 7 +#define HF_TF_SHIFT 8 /* must be same as eflags */ +#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ +#define HF_EM_SHIFT 10 +#define HF_TS_SHIFT 11 +#define HF_IOPL_SHIFT 12 /* must be same as eflags */ +#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ +#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ +#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ +#define HF_VM_SHIFT 17 /* must be same as eflags */ +#define HF_HALTED_SHIFT 18 /* CPU halted */ + +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) +#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) +#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) +#define HF_CS32_MASK (1 << HF_CS32_SHIFT) +#define HF_SS32_MASK (1 << HF_SS32_SHIFT) +#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) +#define HF_PE_MASK (1 << HF_PE_SHIFT) +#define HF_TF_MASK (1 << HF_TF_SHIFT) +#define HF_MP_MASK (1 << HF_MP_SHIFT) +#define HF_EM_MASK (1 << HF_EM_SHIFT) +#define HF_TS_MASK (1 << HF_TS_SHIFT) +#define HF_LMA_MASK (1 << HF_LMA_SHIFT) +#define HF_CS64_MASK (1 << HF_CS64_SHIFT) +#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) +#define HF_HALTED_MASK (1 << HF_HALTED_SHIFT) + +#define CR0_PE_MASK (1 << 0) +#define CR0_MP_MASK (1 << 1) +#define CR0_EM_MASK (1 << 2) +#define CR0_TS_MASK (1 << 3) +#define CR0_ET_MASK (1 << 4) +#define CR0_NE_MASK (1 << 5) +#define CR0_WP_MASK (1 << 16) +#define CR0_AM_MASK (1 << 18) +#define CR0_PG_MASK (1 << 31) + +#define CR4_VME_MASK (1 << 0) +#define CR4_PVI_MASK (1 << 1) +#define CR4_TSD_MASK (1 << 2) +#define CR4_DE_MASK (1 << 3) +#define CR4_PSE_MASK (1 << 4) +#define CR4_PAE_MASK (1 << 5) +#define CR4_PGE_MASK (1 << 7) +#define CR4_PCE_MASK (1 << 8) +#define CR4_OSFXSR_MASK (1 << 9) +#define CR4_OSXMMEXCPT_MASK (1 << 10) + +#define PG_PRESENT_BIT 0 +#define PG_RW_BIT 1 +#define PG_USER_BIT 2 +#define PG_PWT_BIT 3 +#define PG_PCD_BIT 4 +#define PG_ACCESSED_BIT 5 +#define PG_DIRTY_BIT 6 +#define PG_PSE_BIT 7 +#define PG_GLOBAL_BIT 8 +#define PG_NX_BIT 63 + +#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) +#define PG_RW_MASK (1 << PG_RW_BIT) +#define PG_USER_MASK (1 << PG_USER_BIT) +#define PG_PWT_MASK (1 << PG_PWT_BIT) +#define PG_PCD_MASK (1 << PG_PCD_BIT) +#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) +#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) +#define PG_PSE_MASK (1 << PG_PSE_BIT) +#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_NX_MASK (1LL << PG_NX_BIT) + +#define PG_ERROR_W_BIT 1 + +#define PG_ERROR_P_MASK 0x01 +#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) +#define PG_ERROR_U_MASK 0x04 +#define PG_ERROR_RSVD_MASK 0x08 +#define PG_ERROR_I_D_MASK 0x10 + +#define MSR_IA32_APICBASE 0x1b +#define MSR_IA32_APICBASE_BSP (1<<8) +#define MSR_IA32_APICBASE_ENABLE (1<<11) +#define MSR_IA32_APICBASE_BASE (0xfffff<<12) + +#define MSR_IA32_SYSENTER_CS 0x174 +#define MSR_IA32_SYSENTER_ESP 0x175 +#define MSR_IA32_SYSENTER_EIP 0x176 + +#define MSR_MCG_CAP 0x179 +#define MSR_MCG_STATUS 0x17a +#define MSR_MCG_CTL 0x17b + +#define MSR_PAT 0x277 + +#define MSR_EFER 0xc0000080 + +#define MSR_EFER_SCE (1 << 0) +#define MSR_EFER_LME (1 << 8) +#define MSR_EFER_LMA (1 << 10) +#define MSR_EFER_NXE (1 << 11) +#define MSR_EFER_FFXSR (1 << 14) + +#define MSR_STAR 0xc0000081 +#define MSR_LSTAR 0xc0000082 +#define MSR_CSTAR 0xc0000083 +#define MSR_FMASK 0xc0000084 +#define MSR_FSBASE 0xc0000100 +#define MSR_GSBASE 0xc0000101 +#define MSR_KERNELGSBASE 0xc0000102 + +/* cpuid_features bits */ +#define CPUID_FP87 (1 << 0) +#define CPUID_VME (1 << 1) +#define CPUID_DE (1 << 2) +#define CPUID_PSE (1 << 3) +#define CPUID_TSC (1 << 4) +#define CPUID_MSR (1 << 5) +#define CPUID_PAE (1 << 6) +#define CPUID_MCE (1 << 7) +#define CPUID_CX8 (1 << 8) +#define CPUID_APIC (1 << 9) +#define CPUID_SEP (1 << 11) /* sysenter/sysexit */ +#define CPUID_MTRR (1 << 12) +#define CPUID_PGE (1 << 13) +#define CPUID_MCA (1 << 14) +#define CPUID_CMOV (1 << 15) +#define CPUID_PAT (1 << 16) +#define CPUID_CLFLUSH (1 << 19) +/* ... */ +#define CPUID_MMX (1 << 23) +#define CPUID_FXSR (1 << 24) +#define CPUID_SSE (1 << 25) +#define CPUID_SSE2 (1 << 26) + +#define CPUID_EXT_SSE3 (1 << 0) +#define CPUID_EXT_MONITOR (1 << 3) +#define CPUID_EXT_CX16 (1 << 13) + +#define CPUID_EXT2_SYSCALL (1 << 11) +#define CPUID_EXT2_NX (1 << 20) +#define CPUID_EXT2_FFXSR (1 << 25) +#define CPUID_EXT2_LM (1 << 29) + +#define EXCP00_DIVZ 0 +#define EXCP01_SSTP 1 +#define EXCP02_NMI 2 +#define EXCP03_INT3 3 +#define EXCP04_INTO 4 +#define EXCP05_BOUND 5 +#define EXCP06_ILLOP 6 +#define EXCP07_PREX 7 +#define EXCP08_DBLE 8 +#define EXCP09_XERR 9 +#define EXCP0A_TSS 10 +#define EXCP0B_NOSEG 11 +#define EXCP0C_STACK 12 +#define EXCP0D_GPF 13 +#define EXCP0E_PAGE 14 +#define EXCP10_COPR 16 +#define EXCP11_ALGN 17 +#define EXCP12_MCHK 18 + +enum { + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ + CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */ + + CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + CC_OP_MULW, + CC_OP_MULL, + CC_OP_MULQ, + + CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADDW, + CC_OP_ADDL, + CC_OP_ADDQ, + + CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADCW, + CC_OP_ADCL, + CC_OP_ADCQ, + + CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SUBW, + CC_OP_SUBL, + CC_OP_SUBQ, + + CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SBBW, + CC_OP_SBBL, + CC_OP_SBBQ, + + CC_OP_LOGICB, /* modify all flags, CC_DST = res */ + CC_OP_LOGICW, + CC_OP_LOGICL, + CC_OP_LOGICQ, + + CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_INCW, + CC_OP_INCL, + CC_OP_INCQ, + + CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_DECW, + CC_OP_DECL, + CC_OP_DECQ, + + CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ + CC_OP_SHLW, + CC_OP_SHLL, + CC_OP_SHLQ, + + CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ + CC_OP_SARW, + CC_OP_SARL, + CC_OP_SARQ, + + CC_OP_NB, +}; + +#ifdef FLOATX80 +#define USE_X86LDOUBLE +#endif + +#ifdef USE_X86LDOUBLE +typedef floatx80 CPU86_LDouble; +#else +typedef float64 CPU86_LDouble; +#endif + +typedef struct SegmentCache { + uint32_t selector; + target_ulong base; + uint32_t limit; + uint32_t flags; +} SegmentCache; + +typedef union { + uint8_t _b[16]; + uint16_t _w[8]; + uint32_t _l[4]; + uint64_t _q[2]; + float32 _s[4]; + float64 _d[2]; +} XMMReg; + +typedef union { + uint8_t _b[8]; + uint16_t _w[2]; + uint32_t _l[1]; + uint64_t q; +} MMXReg; + +#ifdef WORDS_BIGENDIAN +#define XMM_B(n) _b[15 - (n)] +#define XMM_W(n) _w[7 - (n)] +#define XMM_L(n) _l[3 - (n)] +#define XMM_S(n) _s[3 - (n)] +#define XMM_Q(n) _q[1 - (n)] +#define XMM_D(n) _d[1 - (n)] + +#define MMX_B(n) _b[7 - (n)] +#define MMX_W(n) _w[3 - (n)] +#define MMX_L(n) _l[1 - (n)] +#else +#define XMM_B(n) _b[n] +#define XMM_W(n) _w[n] +#define XMM_L(n) _l[n] +#define XMM_S(n) _s[n] +#define XMM_Q(n) _q[n] +#define XMM_D(n) _d[n] + +#define MMX_B(n) _b[n] +#define MMX_W(n) _w[n] +#define MMX_L(n) _l[n] +#endif +#define MMX_Q(n) q + +#ifdef TARGET_X86_64 +#define CPU_NB_REGS 16 +#else +#define CPU_NB_REGS 8 +#endif + +typedef struct CPUX86State { +#if TARGET_LONG_BITS > HOST_LONG_BITS + /* temporaries if we cannot store them in host registers */ + target_ulong t0, t1, t2; +#endif + + /* standard registers */ + target_ulong regs[CPU_NB_REGS]; + target_ulong eip; + target_ulong eflags; /* eflags register. During CPU emulation, CC + flags and DF are set to zero because they are + stored elsewhere */ + + /* emulator internal eflags handling */ + target_ulong cc_src; + target_ulong cc_dst; + uint32_t cc_op; + int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ + uint32_t hflags; /* hidden flags, see HF_xxx constants */ + + /* segments */ + SegmentCache segs[6]; /* selector values */ + SegmentCache ldt; + SegmentCache tr; + SegmentCache gdt; /* only base and limit are used */ + SegmentCache idt; /* only base and limit are used */ + + target_ulong cr[5]; /* NOTE: cr1 is unused */ + uint32_t a20_mask; + + /* FPU state */ + unsigned int fpstt; /* top of stack index */ + unsigned int fpus; + unsigned int fpuc; + uint8_t fptags[8]; /* 0 = valid, 1 = empty */ + union { +#ifdef USE_X86LDOUBLE + CPU86_LDouble d __attribute__((aligned(16))); +#else + CPU86_LDouble d; +#endif + MMXReg mmx; + } fpregs[8]; + + /* emulator internal variables */ + float_status fp_status; + CPU86_LDouble ft0; + union { + float f; + double d; + int i32; + int64_t i64; + } fp_convert; + + float_status sse_status; + uint32_t mxcsr; + XMMReg xmm_regs[CPU_NB_REGS]; + XMMReg xmm_t0; + MMXReg mmx_t0; + + /* sysenter registers */ + uint32_t sysenter_cs; + uint32_t sysenter_esp; + uint32_t sysenter_eip; + uint64_t efer; + uint64_t star; +#ifdef TARGET_X86_64 + target_ulong lstar; + target_ulong cstar; + target_ulong fmask; + target_ulong kernelgsbase; +#endif + + uint64_t pat; + + /* temporary data for USE_CODE_COPY mode */ +#ifdef USE_CODE_COPY + uint32_t tmp0; + uint32_t saved_esp; + int native_fp_regs; /* if true, the FPU state is in the native CPU regs */ +#endif + + /* exception/interrupt handling */ + jmp_buf jmp_env; + int exception_index; + int error_code; + int exception_is_int; + target_ulong exception_next_eip; + target_ulong dr[8]; /* debug registers */ + int interrupt_request; + int user_mode_only; /* user mode only simulation */ + + CPU_COMMON + + /* processor features (e.g. for CPUID insn) */ + uint32_t cpuid_level; + uint32_t cpuid_vendor1; + uint32_t cpuid_vendor2; + uint32_t cpuid_vendor3; + uint32_t cpuid_version; + uint32_t cpuid_features; + uint32_t cpuid_ext_features; + uint32_t cpuid_xlevel; + uint32_t cpuid_model[12]; + uint32_t cpuid_ext2_features; + +#ifdef USE_KQEMU + int kqemu_enabled; + int last_io_time; +#endif + /* in order to simplify APIC support, we leave this pointer to the + user */ + struct APICState *apic_state; +} CPUX86State; + +CPUX86State *cpu_x86_init(void); +int cpu_x86_exec(CPUX86State *s); +void cpu_x86_close(CPUX86State *s); +int cpu_get_pic_interrupt(CPUX86State *s); +/* MSDOS compatibility mode FPU exception support */ +void cpu_set_ferr(CPUX86State *s); + +/* this function must always be used to load data in the segment + cache: it synchronizes the hflags with the segment cache values */ +static inline void cpu_x86_load_seg_cache(CPUX86State *env, + int seg_reg, unsigned int selector, + uint32_t base, unsigned int limit, + unsigned int flags) +{ + SegmentCache *sc; + unsigned int new_hflags; + + sc = &env->segs[seg_reg]; + sc->selector = selector; + sc->base = base; + sc->limit = limit; + sc->flags = flags; + + /* update the hidden flags */ + { + if (seg_reg == R_CS) { +#ifdef TARGET_X86_64 + if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { + /* long mode */ + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + env->hflags &= ~(HF_ADDSEG_MASK); + } else +#endif + { + /* legacy / compatibility case */ + new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_CS32_SHIFT); + env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | + new_hflags; + } + } + new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_SS32_SHIFT); + if (env->hflags & HF_CS64_MASK) { + /* zero base assumed for DS, ES and SS in long mode */ + } else if (!(env->cr[0] & CR0_PE_MASK) || + (env->eflags & VM_MASK) || + !(env->hflags & HF_CS32_MASK)) { + /* XXX: try to avoid this test. The problem comes from the + fact that is real mode or vm86 mode we only modify the + 'base' and 'selector' fields of the segment cache to go + faster. A solution may be to force addseg to one in + translate-i386.c. */ + new_hflags |= HF_ADDSEG_MASK; + } else { + new_hflags |= ((env->segs[R_DS].base | + env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << + HF_ADDSEG_SHIFT; + } + env->hflags = (env->hflags & + ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; + } +} + +/* wrapper, just in case memory mappings must be changed */ +static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) +{ +#if HF_CPL_MASK == 3 + s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; +#else +#error HF_CPL_MASK is hardcoded +#endif +} + +/* used for debug or cpu save/restore */ +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f); +CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper); + +/* the following helpers are only usable in user mode simulation as + they can trigger unexpected exceptions */ +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); +void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32); +void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32); + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +struct siginfo; +int cpu_x86_signal_handler(int host_signum, struct siginfo *info, + void *puc); +void cpu_x86_set_a20(CPUX86State *env, int a20_state); + +uint64_t cpu_get_tsc(CPUX86State *env); + +void cpu_set_apic_base(CPUX86State *env, uint64_t val); +uint64_t cpu_get_apic_base(CPUX86State *env); +void cpu_set_apic_tpr(CPUX86State *env, uint8_t val); +#ifndef NO_CPU_IO_DEFS +uint8_t cpu_get_apic_tpr(CPUX86State *env); +#endif + +/* will be suppressed */ +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); + +/* used to debug */ +#define X86_DUMP_FPU 0x0001 /* dump FPU state too */ +#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ + +#ifdef USE_KQEMU +static inline int cpu_get_time_fast(void) +{ + int low, high; + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + return low; +} +#endif + +#define TARGET_PAGE_BITS 12 +#include "cpu-all.h" + +#endif /* CPU_I386_H */ diff --git a/target-i386/exec.h b/target-i386/exec.h new file mode 100644 index 0000000..609a586 --- /dev/null +++ b/target-i386/exec.h @@ -0,0 +1,575 @@ +/* + * i386 execution defines + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "config.h" +#include "dyngen-exec.h" + +/* XXX: factorize this mess */ +#ifdef TARGET_X86_64 +#define TARGET_LONG_BITS 64 +#else +#define TARGET_LONG_BITS 32 +#endif + +#include "cpu-defs.h" + +/* at least 4 register variables are defined */ +register struct CPUX86State *env asm(AREG0); + +#if TARGET_LONG_BITS > HOST_LONG_BITS + +/* no registers can be used */ +#define T0 (env->t0) +#define T1 (env->t1) +#define T2 (env->t2) + +#else + +/* XXX: use unsigned long instead of target_ulong - better code will + be generated for 64 bit CPUs */ +register target_ulong T0 asm(AREG1); +register target_ulong T1 asm(AREG2); +register target_ulong T2 asm(AREG3); + +/* if more registers are available, we define some registers too */ +#ifdef AREG4 +register target_ulong EAX asm(AREG4); +#define reg_EAX +#endif + +#ifdef AREG5 +register target_ulong ESP asm(AREG5); +#define reg_ESP +#endif + +#ifdef AREG6 +register target_ulong EBP asm(AREG6); +#define reg_EBP +#endif + +#ifdef AREG7 +register target_ulong ECX asm(AREG7); +#define reg_ECX +#endif + +#ifdef AREG8 +register target_ulong EDX asm(AREG8); +#define reg_EDX +#endif + +#ifdef AREG9 +register target_ulong EBX asm(AREG9); +#define reg_EBX +#endif + +#ifdef AREG10 +register target_ulong ESI asm(AREG10); +#define reg_ESI +#endif + +#ifdef AREG11 +register target_ulong EDI asm(AREG11); +#define reg_EDI +#endif + +#endif /* ! (TARGET_LONG_BITS > HOST_LONG_BITS) */ + +#define A0 T2 + +extern FILE *logfile; +extern int loglevel; + +#ifndef reg_EAX +#define EAX (env->regs[R_EAX]) +#endif +#ifndef reg_ECX +#define ECX (env->regs[R_ECX]) +#endif +#ifndef reg_EDX +#define EDX (env->regs[R_EDX]) +#endif +#ifndef reg_EBX +#define EBX (env->regs[R_EBX]) +#endif +#ifndef reg_ESP +#define ESP (env->regs[R_ESP]) +#endif +#ifndef reg_EBP +#define EBP (env->regs[R_EBP]) +#endif +#ifndef reg_ESI +#define ESI (env->regs[R_ESI]) +#endif +#ifndef reg_EDI +#define EDI (env->regs[R_EDI]) +#endif +#define EIP (env->eip) +#define DF (env->df) + +#define CC_SRC (env->cc_src) +#define CC_DST (env->cc_dst) +#define CC_OP (env->cc_op) + +/* float macros */ +#define FT0 (env->ft0) +#define ST0 (env->fpregs[env->fpstt].d) +#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) +#define ST1 ST(1) + +#ifdef USE_FP_CONVERT +#define FP_CONVERT (env->fp_convert) +#endif + +#include "cpu.h" +#include "exec-all.h" + +typedef struct CCTable { + int (*compute_all)(void); /* return all the flags */ + int (*compute_c)(void); /* return the C flag */ +} CCTable; + +extern CCTable cc_table[]; + +void load_seg(int seg_reg, int selector); +void helper_ljmp_protected_T0_T1(int next_eip); +void helper_lcall_real_T0_T1(int shift, int next_eip); +void helper_lcall_protected_T0_T1(int shift, int next_eip); +void helper_iret_real(int shift); +void helper_iret_protected(int shift, int next_eip); +void helper_lret_protected(int shift, int addend); +void helper_lldt_T0(void); +void helper_ltr_T0(void); +void helper_movl_crN_T0(int reg); +void helper_movl_drN_T0(int reg); +void helper_invlpg(target_ulong addr); +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); +void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr); +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, + int is_write, int is_user, int is_softmmu); +void tlb_fill(target_ulong addr, int is_write, int is_user, + void *retaddr); +void __hidden cpu_lock(void); +void __hidden cpu_unlock(void); +void do_interrupt(int intno, int is_int, int error_code, + target_ulong next_eip, int is_hw); +void do_interrupt_user(int intno, int is_int, int error_code, + target_ulong next_eip); +void raise_interrupt(int intno, int is_int, int error_code, + int next_eip_addend); +void raise_exception_err(int exception_index, int error_code); +void raise_exception(int exception_index); +void __hidden cpu_loop_exit(void); + +void OPPROTO op_movl_eflags_T0(void); +void OPPROTO op_movl_T0_eflags(void); +void helper_divl_EAX_T0(void); +void helper_idivl_EAX_T0(void); +void helper_mulq_EAX_T0(void); +void helper_imulq_EAX_T0(void); +void helper_imulq_T0_T1(void); +void helper_divq_EAX_T0(void); +void helper_idivq_EAX_T0(void); +void helper_bswapq_T0(void); +void helper_cmpxchg8b(void); +void helper_cpuid(void); +void helper_enter_level(int level, int data32); +void helper_enter64_level(int level, int data64); +void helper_sysenter(void); +void helper_sysexit(void); +void helper_syscall(int next_eip_addend); +void helper_sysret(int dflag); +void helper_rdtsc(void); +void helper_rdmsr(void); +void helper_wrmsr(void); +void helper_lsl(void); +void helper_lar(void); +void helper_verr(void); +void helper_verw(void); + +void check_iob_T0(void); +void check_iow_T0(void); +void check_iol_T0(void); +void check_iob_DX(void); +void check_iow_DX(void); +void check_iol_DX(void); + +#if !defined(CONFIG_USER_ONLY) + +#include "softmmu_exec.h" + +static inline double ldfq(target_ulong ptr) +{ + union { + double d; + uint64_t i; + } u; + u.i = ldq(ptr); + return u.d; +} + +static inline void stfq(target_ulong ptr, double v) +{ + union { + double d; + uint64_t i; + } u; + u.d = v; + stq(ptr, u.i); +} + +static inline float ldfl(target_ulong ptr) +{ + union { + float f; + uint32_t i; + } u; + u.i = ldl(ptr); + return u.f; +} + +static inline void stfl(target_ulong ptr, float v) +{ + union { + float f; + uint32_t i; + } u; + u.f = v; + stl(ptr, u.i); +} + +#endif /* !defined(CONFIG_USER_ONLY) */ + +#ifdef USE_X86LDOUBLE +/* use long double functions */ +#define floatx_to_int32 floatx80_to_int32 +#define floatx_to_int64 floatx80_to_int64 +#define floatx_to_int32_round_to_zero floatx80_to_int32_round_to_zero +#define floatx_to_int64_round_to_zero floatx80_to_int64_round_to_zero +#define floatx_abs floatx80_abs +#define floatx_chs floatx80_chs +#define floatx_round_to_int floatx80_round_to_int +#define floatx_compare floatx80_compare +#define floatx_compare_quiet floatx80_compare_quiet +#define sin sinl +#define cos cosl +#define sqrt sqrtl +#define pow powl +#define log logl +#define tan tanl +#define atan2 atan2l +#define floor floorl +#define ceil ceill +#define ldexp ldexpl +#else +#define floatx_to_int32 float64_to_int32 +#define floatx_to_int64 float64_to_int64 +#define floatx_to_int32_round_to_zero float64_to_int32_round_to_zero +#define floatx_to_int64_round_to_zero float64_to_int64_round_to_zero +#define floatx_abs float64_abs +#define floatx_chs float64_chs +#define floatx_round_to_int float64_round_to_int +#define floatx_compare float64_compare +#define floatx_compare_quiet float64_compare_quiet +#endif + +extern CPU86_LDouble sin(CPU86_LDouble x); +extern CPU86_LDouble cos(CPU86_LDouble x); +extern CPU86_LDouble sqrt(CPU86_LDouble x); +extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble); +extern CPU86_LDouble log(CPU86_LDouble x); +extern CPU86_LDouble tan(CPU86_LDouble x); +extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble); +extern CPU86_LDouble floor(CPU86_LDouble x); +extern CPU86_LDouble ceil(CPU86_LDouble x); + +#define RC_MASK 0xc00 +#define RC_NEAR 0x000 +#define RC_DOWN 0x400 +#define RC_UP 0x800 +#define RC_CHOP 0xc00 + +#define MAXTAN 9223372036854775808.0 + +#ifdef USE_X86LDOUBLE + +/* only for x86 */ +typedef union { + long double d; + struct { + unsigned long long lower; + unsigned short upper; + } l; +} CPU86_LDoubleU; + +/* the following deal with x86 long double-precision numbers */ +#define MAXEXPD 0x7fff +#define EXPBIAS 16383 +#define EXPD(fp) (fp.l.upper & 0x7fff) +#define SIGND(fp) ((fp.l.upper) & 0x8000) +#define MANTD(fp) (fp.l.lower) +#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS + +#else + +/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */ +typedef union { + double d; +#if !defined(WORDS_BIGENDIAN) && !defined(__arm__) + struct { + uint32_t lower; + int32_t upper; + } l; +#else + struct { + int32_t upper; + uint32_t lower; + } l; +#endif +#ifndef __arm__ + int64_t ll; +#endif +} CPU86_LDoubleU; + +/* the following deal with IEEE double-precision numbers */ +#define MAXEXPD 0x7ff +#define EXPBIAS 1023 +#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) +#define SIGND(fp) ((fp.l.upper) & 0x80000000) +#ifdef __arm__ +#define MANTD(fp) (fp.l.lower | ((uint64_t)(fp.l.upper & ((1 << 20) - 1)) << 32)) +#else +#define MANTD(fp) (fp.ll & ((1LL << 52) - 1)) +#endif +#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7ff << 20)) | (EXPBIAS << 20) +#endif + +static inline void fpush(void) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fptags[env->fpstt] = 0; /* validate stack entry */ +} + +static inline void fpop(void) +{ + env->fptags[env->fpstt] = 1; /* invvalidate stack entry */ + env->fpstt = (env->fpstt + 1) & 7; +} + +#ifndef USE_X86LDOUBLE +static inline CPU86_LDouble helper_fldt(target_ulong ptr) +{ + CPU86_LDoubleU temp; + int upper, e; + uint64_t ll; + + /* mantissa */ + upper = lduw(ptr + 8); + /* XXX: handle overflow ? */ + e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */ + e |= (upper >> 4) & 0x800; /* sign */ + ll = (ldq(ptr) >> 11) & ((1LL << 52) - 1); +#ifdef __arm__ + temp.l.upper = (e << 20) | (ll >> 32); + temp.l.lower = ll; +#else + temp.ll = ll | ((uint64_t)e << 52); +#endif + return temp.d; +} + +static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) +{ + CPU86_LDoubleU temp; + int e; + + temp.d = f; + /* mantissa */ + stq(ptr, (MANTD(temp) << 11) | (1LL << 63)); + /* exponent + sign */ + e = EXPD(temp) - EXPBIAS + 16383; + e |= SIGND(temp) >> 16; + stw(ptr + 8, e); +} +#else + +/* XXX: same endianness assumed */ + +#ifdef CONFIG_USER_ONLY + +static inline CPU86_LDouble helper_fldt(target_ulong ptr) +{ + return *(CPU86_LDouble *)ptr; +} + +static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) +{ + *(CPU86_LDouble *)ptr = f; +} + +#else + +/* we use memory access macros */ + +static inline CPU86_LDouble helper_fldt(target_ulong ptr) +{ + CPU86_LDoubleU temp; + + temp.l.lower = ldq(ptr); + temp.l.upper = lduw(ptr + 8); + return temp.d; +} + +static inline void helper_fstt(CPU86_LDouble f, target_ulong ptr) +{ + CPU86_LDoubleU temp; + + temp.d = f; + stq(ptr, temp.l.lower); + stw(ptr + 8, temp.l.upper); +} + +#endif /* !CONFIG_USER_ONLY */ + +#endif /* USE_X86LDOUBLE */ + +#define FPUS_IE (1 << 0) +#define FPUS_DE (1 << 1) +#define FPUS_ZE (1 << 2) +#define FPUS_OE (1 << 3) +#define FPUS_UE (1 << 4) +#define FPUS_PE (1 << 5) +#define FPUS_SF (1 << 6) +#define FPUS_SE (1 << 7) +#define FPUS_B (1 << 15) + +#define FPUC_EM 0x3f + +extern const CPU86_LDouble f15rk[7]; + +void helper_fldt_ST0_A0(void); +void helper_fstt_ST0_A0(void); +void fpu_raise_exception(void); +CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b); +void helper_fbld_ST0_A0(void); +void helper_fbst_ST0_A0(void); +void helper_f2xm1(void); +void helper_fyl2x(void); +void helper_fptan(void); +void helper_fpatan(void); +void helper_fxtract(void); +void helper_fprem1(void); +void helper_fprem(void); +void helper_fyl2xp1(void); +void helper_fsqrt(void); +void helper_fsincos(void); +void helper_frndint(void); +void helper_fscale(void); +void helper_fsin(void); +void helper_fcos(void); +void helper_fxam_ST0(void); +void helper_fstenv(target_ulong ptr, int data32); +void helper_fldenv(target_ulong ptr, int data32); +void helper_fsave(target_ulong ptr, int data32); +void helper_frstor(target_ulong ptr, int data32); +void helper_fxsave(target_ulong ptr, int data64); +void helper_fxrstor(target_ulong ptr, int data64); +void restore_native_fp_state(CPUState *env); +void save_native_fp_state(CPUState *env); +float approx_rsqrt(float a); +float approx_rcp(float a); +void update_fp_status(void); +void helper_hlt(void); +void helper_monitor(void); +void helper_mwait(void); + +extern const uint8_t parity_table[256]; +extern const uint8_t rclw_table[32]; +extern const uint8_t rclb_table[32]; + +static inline uint32_t compute_eflags(void) +{ + return env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); +} + +/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */ +static inline void load_eflags(int eflags, int update_mask) +{ + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + DF = 1 - (2 * ((eflags >> 10) & 1)); + env->eflags = (env->eflags & ~update_mask) | + (eflags & update_mask); +} + +static inline void env_to_regs(void) +{ +#ifdef reg_EAX + EAX = env->regs[R_EAX]; +#endif +#ifdef reg_ECX + ECX = env->regs[R_ECX]; +#endif +#ifdef reg_EDX + EDX = env->regs[R_EDX]; +#endif +#ifdef reg_EBX + EBX = env->regs[R_EBX]; +#endif +#ifdef reg_ESP + ESP = env->regs[R_ESP]; +#endif +#ifdef reg_EBP + EBP = env->regs[R_EBP]; +#endif +#ifdef reg_ESI + ESI = env->regs[R_ESI]; +#endif +#ifdef reg_EDI + EDI = env->regs[R_EDI]; +#endif +} + +static inline void regs_to_env(void) +{ +#ifdef reg_EAX + env->regs[R_EAX] = EAX; +#endif +#ifdef reg_ECX + env->regs[R_ECX] = ECX; +#endif +#ifdef reg_EDX + env->regs[R_EDX] = EDX; +#endif +#ifdef reg_EBX + env->regs[R_EBX] = EBX; +#endif +#ifdef reg_ESP + env->regs[R_ESP] = ESP; +#endif +#ifdef reg_EBP + env->regs[R_EBP] = EBP; +#endif +#ifdef reg_ESI + env->regs[R_ESI] = ESI; +#endif +#ifdef reg_EDI + env->regs[R_EDI] = EDI; +#endif +} diff --git a/target-i386/helper.c b/target-i386/helper.c new file mode 100644 index 0000000..70e9fae --- /dev/null +++ b/target-i386/helper.c @@ -0,0 +1,3540 @@ +/* + * i386 helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "exec.h" + +//#define DEBUG_PCALL + +#if 0 +#define raise_exception_err(a, b)\ +do {\ + if (logfile)\ + fprintf(logfile, "raise_exception line=%d\n", __LINE__);\ + (raise_exception_err)(a, b);\ +} while (0) +#endif + +const uint8_t parity_table[256] = { + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, +}; + +/* modulo 17 table */ +const uint8_t rclw_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9,10,11,12,13,14,15, + 16, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9,10,11,12,13,14, +}; + +/* modulo 9 table */ +const uint8_t rclb_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 0, 1, 2, 3, 4, 5, + 6, 7, 8, 0, 1, 2, 3, 4, +}; + +const CPU86_LDouble f15rk[7] = +{ + 0.00000000000000000000L, + 1.00000000000000000000L, + 3.14159265358979323851L, /*pi*/ + 0.30102999566398119523L, /*lg2*/ + 0.69314718055994530943L, /*ln2*/ + 1.44269504088896340739L, /*l2e*/ + 3.32192809488736234781L, /*l2t*/ +}; + +/* thread support */ + +spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; + +void cpu_lock(void) +{ + spin_lock(&global_cpu_lock); +} + +void cpu_unlock(void) +{ + spin_unlock(&global_cpu_lock); +} + +void cpu_loop_exit(void) +{ + /* NOTE: the register at this point must be saved by hand because + longjmp restore them */ + regs_to_env(); + longjmp(env->jmp_env, 1); +} + +/* return non zero if error */ +static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, + int selector) +{ + SegmentCache *dt; + int index; + target_ulong ptr; + + if (selector & 0x4) + dt = &env->ldt; + else + dt = &env->gdt; + index = selector & ~7; + if ((index + 7) > dt->limit) + return -1; + ptr = dt->base + index; + *e1_ptr = ldl_kernel(ptr); + *e2_ptr = ldl_kernel(ptr + 4); + return 0; +} + +static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) +{ + unsigned int limit; + limit = (e1 & 0xffff) | (e2 & 0x000f0000); + if (e2 & DESC_G_MASK) + limit = (limit << 12) | 0xfff; + return limit; +} + +static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) +{ + return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); +} + +static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) +{ + sc->base = get_seg_base(e1, e2); + sc->limit = get_seg_limit(e1, e2); + sc->flags = e2; +} + +/* init the segment cache in vm86 mode. */ +static inline void load_seg_vm(int seg, int selector) +{ + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg, selector, + (selector << 4), 0xffff, 0); +} + +static inline void get_ss_esp_from_tss(uint32_t *ss_ptr, + uint32_t *esp_ptr, int dpl) +{ + int type, index, shift; + +#if 0 + { + int i; + printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); + for(i=0;i<env->tr.limit;i++) { + printf("%02x ", env->tr.base[i]); + if ((i & 7) == 7) printf("\n"); + } + printf("\n"); + } +#endif + + if (!(env->tr.flags & DESC_P_MASK)) + cpu_abort(env, "invalid tss"); + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) + cpu_abort(env, "invalid tss type"); + shift = type >> 3; + index = (dpl * 4 + 2) << shift; + if (index + (4 << shift) - 1 > env->tr.limit) + raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); + if (shift == 0) { + *esp_ptr = lduw_kernel(env->tr.base + index); + *ss_ptr = lduw_kernel(env->tr.base + index + 2); + } else { + *esp_ptr = ldl_kernel(env->tr.base + index); + *ss_ptr = lduw_kernel(env->tr.base + index + 4); + } +} + +/* XXX: merge with load_seg() */ +static void tss_load_seg(int seg_reg, int selector) +{ + uint32_t e1, e2; + int rpl, dpl, cpl; + + if ((selector & 0xfffc) != 0) { + if (load_segment(&e1, &e2, selector) != 0) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + if (!(e2 & DESC_S_MASK)) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (seg_reg == R_CS) { + if (!(e2 & DESC_CS_MASK)) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + /* XXX: is it correct ? */ + if (dpl != rpl) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + if ((e2 & DESC_C_MASK) && dpl > rpl) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + } else if (seg_reg == R_SS) { + /* SS must be writable data */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + if (dpl != cpl || dpl != rpl) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + } else { + /* not readable code */ + if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + /* if data or non conforming code, checks the rights */ + if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { + if (dpl < cpl || dpl < rpl) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + if (seg_reg == R_SS || seg_reg == R_CS) + raise_exception_err(EXCP0A_TSS, selector & 0xfffc); + } +} + +#define SWITCH_TSS_JMP 0 +#define SWITCH_TSS_IRET 1 +#define SWITCH_TSS_CALL 2 + +/* XXX: restore CPU state in registers (PowerPC case) */ +static void switch_tss(int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip) +{ + int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; + target_ulong tss_base; + uint32_t new_regs[8], new_segs[6]; + uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; + uint32_t old_eflags, eflags_mask; + SegmentCache *dt; + int index; + target_ulong ptr; + + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) + fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); +#endif + + /* if task gate, we read the TSS segment and we load it */ + if (type == 5) { + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); + tss_selector = e1 >> 16; + if (tss_selector & 4) + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); + if (load_segment(&e1, &e2, tss_selector) != 0) + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); + if (e2 & DESC_S_MASK) + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) + raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); + } + + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); + + if (type & 8) + tss_limit_max = 103; + else + tss_limit_max = 43; + tss_limit = get_seg_limit(e1, e2); + tss_base = get_seg_base(e1, e2); + if ((tss_selector & 4) != 0 || + tss_limit < tss_limit_max) + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); + old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if (old_type & 8) + old_tss_limit_max = 103; + else + old_tss_limit_max = 43; + + /* read all the registers from the new TSS */ + if (type & 8) { + /* 32 bit */ + new_cr3 = ldl_kernel(tss_base + 0x1c); + new_eip = ldl_kernel(tss_base + 0x20); + new_eflags = ldl_kernel(tss_base + 0x24); + for(i = 0; i < 8; i++) + new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); + for(i = 0; i < 6; i++) + new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); + new_ldt = lduw_kernel(tss_base + 0x60); + new_trap = ldl_kernel(tss_base + 0x64); + } else { + /* 16 bit */ + new_cr3 = 0; + new_eip = lduw_kernel(tss_base + 0x0e); + new_eflags = lduw_kernel(tss_base + 0x10); + for(i = 0; i < 8; i++) + new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; + for(i = 0; i < 4; i++) + new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); + new_ldt = lduw_kernel(tss_base + 0x2a); + new_segs[R_FS] = 0; + new_segs[R_GS] = 0; + new_trap = 0; + } + + /* NOTE: we must avoid memory exceptions during the task switch, + so we make dummy accesses before */ + /* XXX: it can still fail in some cases, so a bigger hack is + necessary to valid the TLB after having done the accesses */ + + v1 = ldub_kernel(env->tr.base); + v2 = ldub_kernel(env->tr.base + old_tss_limit_max); + stb_kernel(env->tr.base, v1); + stb_kernel(env->tr.base + old_tss_limit_max, v2); + + /* clear busy bit (it is restartable) */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { + target_ulong ptr; + uint32_t e2; + ptr = env->gdt.base + (env->tr.selector & ~7); + e2 = ldl_kernel(ptr + 4); + e2 &= ~DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + old_eflags = compute_eflags(); + if (source == SWITCH_TSS_IRET) + old_eflags &= ~NT_MASK; + + /* save the current state in the old TSS */ + if (type & 8) { + /* 32 bit */ + stl_kernel(env->tr.base + 0x20, next_eip); + stl_kernel(env->tr.base + 0x24, old_eflags); + stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX); + stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX); + stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX); + stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX); + stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP); + stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP); + stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI); + stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI); + for(i = 0; i < 6; i++) + stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); + } else { + /* 16 bit */ + stw_kernel(env->tr.base + 0x0e, next_eip); + stw_kernel(env->tr.base + 0x10, old_eflags); + stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX); + stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX); + stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX); + stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX); + stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP); + stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP); + stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI); + stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI); + for(i = 0; i < 4; i++) + stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); + } + + /* now if an exception occurs, it will occurs in the next task + context */ + + if (source == SWITCH_TSS_CALL) { + stw_kernel(tss_base, env->tr.selector); + new_eflags |= NT_MASK; + } + + /* set busy bit */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { + target_ulong ptr; + uint32_t e2; + ptr = env->gdt.base + (tss_selector & ~7); + e2 = ldl_kernel(ptr + 4); + e2 |= DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + + /* set the new CPU state */ + /* from this point, any exception which occurs can give problems */ + env->cr[0] |= CR0_TS_MASK; + env->hflags |= HF_TS_MASK; + env->tr.selector = tss_selector; + env->tr.base = tss_base; + env->tr.limit = tss_limit; + env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; + + if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { + cpu_x86_update_cr3(env, new_cr3); + } + + /* load all registers without an exception, then reload them with + possible exception */ + env->eip = new_eip; + eflags_mask = TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; + if (!(type & 8)) + eflags_mask &= 0xffff; + load_eflags(new_eflags, eflags_mask); + /* XXX: what to do in 16 bit case ? */ + EAX = new_regs[0]; + ECX = new_regs[1]; + EDX = new_regs[2]; + EBX = new_regs[3]; + ESP = new_regs[4]; + EBP = new_regs[5]; + ESI = new_regs[6]; + EDI = new_regs[7]; + if (new_eflags & VM_MASK) { + for(i = 0; i < 6; i++) + load_seg_vm(i, new_segs[i]); + /* in vm86, CPL is always 3 */ + cpu_x86_set_cpl(env, 3); + } else { + /* CPL is set the RPL of CS */ + cpu_x86_set_cpl(env, new_segs[R_CS] & 3); + /* first just selectors as the rest may trigger exceptions */ + for(i = 0; i < 6; i++) + cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); + } + + env->ldt.selector = new_ldt & ~4; + env->ldt.base = 0; + env->ldt.limit = 0; + env->ldt.flags = 0; + + /* load the LDT */ + if (new_ldt & 4) + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); + + if ((new_ldt & 0xfffc) != 0) { + dt = &env->gdt; + index = new_ldt & ~7; + if ((index + 7) > dt->limit) + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + + /* load the segments */ + if (!(new_eflags & VM_MASK)) { + tss_load_seg(R_CS, new_segs[R_CS]); + tss_load_seg(R_SS, new_segs[R_SS]); + tss_load_seg(R_ES, new_segs[R_ES]); + tss_load_seg(R_DS, new_segs[R_DS]); + tss_load_seg(R_FS, new_segs[R_FS]); + tss_load_seg(R_GS, new_segs[R_GS]); + } + + /* check that EIP is in the CS segment limits */ + if (new_eip > env->segs[R_CS].limit) { + /* XXX: different exception if CALL ? */ + raise_exception_err(EXCP0D_GPF, 0); + } +} + +/* check if Port I/O is allowed in TSS */ +static inline void check_io(int addr, int size) +{ + int io_offset, val, mask; + + /* TSS must be a valid 32 bit one */ + if (!(env->tr.flags & DESC_P_MASK) || + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || + env->tr.limit < 103) + goto fail; + io_offset = lduw_kernel(env->tr.base + 0x66); + io_offset += (addr >> 3); + /* Note: the check needs two bytes */ + if ((io_offset + 1) > env->tr.limit) + goto fail; + val = lduw_kernel(env->tr.base + io_offset); + val >>= (addr & 7); + mask = (1 << size) - 1; + /* all bits must be zero to allow the I/O */ + if ((val & mask) != 0) { + fail: + raise_exception_err(EXCP0D_GPF, 0); + } +} + +void check_iob_T0(void) +{ + check_io(T0, 1); +} + +void check_iow_T0(void) +{ + check_io(T0, 2); +} + +void check_iol_T0(void) +{ + check_io(T0, 4); +} + +void check_iob_DX(void) +{ + check_io(EDX & 0xffff, 1); +} + +void check_iow_DX(void) +{ + check_io(EDX & 0xffff, 2); +} + +void check_iol_DX(void) +{ + check_io(EDX & 0xffff, 4); +} + +static inline unsigned int get_sp_mask(unsigned int e2) +{ + if (e2 & DESC_B_MASK) + return 0xffffffff; + else + return 0xffff; +} + +/* XXX: add a is_user flag to have proper security support */ +#define PUSHW(ssp, sp, sp_mask, val)\ +{\ + sp -= 2;\ + stw_kernel((ssp) + (sp & (sp_mask)), (val));\ +} + +#define PUSHL(ssp, sp, sp_mask, val)\ +{\ + sp -= 4;\ + stl_kernel((ssp) + (sp & (sp_mask)), (val));\ +} + +#define POPW(ssp, sp, sp_mask, val)\ +{\ + val = lduw_kernel((ssp) + (sp & (sp_mask)));\ + sp += 2;\ +} + +#define POPL(ssp, sp, sp_mask, val)\ +{\ + val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\ + sp += 4;\ +} + +/* protected mode interrupt */ +static void do_interrupt_protected(int intno, int is_int, int error_code, + unsigned int next_eip, int is_hw) +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int type, dpl, selector, ss_dpl, cpl, sp_mask; + int has_error_code, new_stack, shift; + uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; + uint32_t old_eip; + + has_error_code = 0; + if (!is_int && !is_hw) { + switch(intno) { + case 8: + case 10: + case 11: + case 12: + case 13: + case 14: + case 17: + has_error_code = 1; + break; + } + } + if (is_int) + old_eip = next_eip; + else + old_eip = env->eip; + + dt = &env->idt; + if (intno * 8 + 7 > dt->limit) + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); + ptr = dt->base + intno * 8; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch(type) { + case 5: /* task gate */ + /* must do that check here to return the correct error code */ + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); + switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); + if (has_error_code) { + int mask, type; + /* push the error code */ + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + shift = type >> 3; + if (env->segs[R_SS].flags & DESC_B_MASK) + mask = 0xffffffff; + else + mask = 0xffff; + esp = (ESP - (2 << shift)) & mask; + ssp = env->segs[R_SS].base + esp; + if (shift) + stl_kernel(ssp, error_code); + else + stw_kernel(ssp, error_code); + ESP = (esp & mask) | (ESP & ~mask); + } + return; + case 6: /* 286 interrupt gate */ + case 7: /* 286 trap gate */ + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privledge if software int */ + if (is_int && dpl < cpl) + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + if ((selector & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, 0); + + if (load_segment(&e1, &e2, selector) != 0) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner priviledge */ + get_ss_esp_from_tss(&ss, &esp, dpl); + if ((ss & 0xfffc) == 0) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if ((ss & 3) != dpl) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (load_segment(&ss_e1, &ss_e2, ss) != 0) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (!(ss_e2 & DESC_P_MASK)) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + new_stack = 1; + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same priviledge */ + if (env->eflags & VM_MASK) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + esp = ESP; + dpl = cpl; + } else { + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + sp_mask = 0; /* avoid warning */ + ssp = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + + shift = type >> 3; + +#if 0 + /* XXX: check that enough room is available */ + push_size = 6 + (new_stack << 2) + (has_error_code << 1); + if (env->eflags & VM_MASK) + push_size += 8; + push_size <<= shift; +#endif + if (shift == 1) { + if (new_stack) { + if (env->eflags & VM_MASK) { + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, esp, sp_mask, ESP); + } + PUSHL(ssp, esp, sp_mask, compute_eflags()); + PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHL(ssp, esp, sp_mask, error_code); + } + } else { + if (new_stack) { + if (env->eflags & VM_MASK) { + PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, esp, sp_mask, ESP); + } + PUSHW(ssp, esp, sp_mask, compute_eflags()); + PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHW(ssp, esp, sp_mask, error_code); + } + } + + if (new_stack) { + if (env->eflags & VM_MASK) { + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); + } + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); + } + ESP = (ESP & ~sp_mask) | (esp & sp_mask); + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + env->eip = offset; + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); +} + +#ifdef TARGET_X86_64 + +#define PUSHQ(sp, val)\ +{\ + sp -= 8;\ + stq_kernel(sp, (val));\ +} + +#define POPQ(sp, val)\ +{\ + val = ldq_kernel(sp);\ + sp += 8;\ +} + +static inline target_ulong get_rsp_from_tss(int level) +{ + int index; + +#if 0 + printf("TR: base=" TARGET_FMT_lx " limit=%x\n", + env->tr.base, env->tr.limit); +#endif + + if (!(env->tr.flags & DESC_P_MASK)) + cpu_abort(env, "invalid tss"); + index = 8 * level + 4; + if ((index + 7) > env->tr.limit) + raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); + return ldq_kernel(env->tr.base + index); +} + +/* 64 bit interrupt */ +static void do_interrupt64(int intno, int is_int, int error_code, + target_ulong next_eip, int is_hw) +{ + SegmentCache *dt; + target_ulong ptr; + int type, dpl, selector, cpl, ist; + int has_error_code, new_stack; + uint32_t e1, e2, e3, ss; + target_ulong old_eip, esp, offset; + + has_error_code = 0; + if (!is_int && !is_hw) { + switch(intno) { + case 8: + case 10: + case 11: + case 12: + case 13: + case 14: + case 17: + has_error_code = 1; + break; + } + } + if (is_int) + old_eip = next_eip; + else + old_eip = env->eip; + + dt = &env->idt; + if (intno * 16 + 15 > dt->limit) + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); + ptr = dt->base + intno * 16; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + e3 = ldl_kernel(ptr + 8); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch(type) { + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privledge if software int */ + if (is_int && dpl < cpl) + raise_exception_err(EXCP0D_GPF, intno * 16 + 2); + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2); + selector = e1 >> 16; + offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); + ist = e2 & 7; + if ((selector & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, 0); + + if (load_segment(&e1, &e2, selector) != 0) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); + if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { + /* to inner priviledge */ + if (ist != 0) + esp = get_rsp_from_tss(ist + 3); + else + esp = get_rsp_from_tss(dpl); + esp &= ~0xfLL; /* align stack */ + ss = 0; + new_stack = 1; + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same priviledge */ + if (env->eflags & VM_MASK) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; + if (ist != 0) + esp = get_rsp_from_tss(ist + 3); + else + esp = ESP; + esp &= ~0xfLL; /* align stack */ + dpl = cpl; + } else { + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + + PUSHQ(esp, env->segs[R_SS].selector); + PUSHQ(esp, ESP); + PUSHQ(esp, compute_eflags()); + PUSHQ(esp, env->segs[R_CS].selector); + PUSHQ(esp, old_eip); + if (has_error_code) { + PUSHQ(esp, error_code); + } + + if (new_stack) { + ss = 0 | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); + } + ESP = esp; + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + env->eip = offset; + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); +} +#endif + +void helper_syscall(int next_eip_addend) +{ + int selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(EXCP06_ILLOP, 0); + } + selector = (env->star >> 32) & 0xffff; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + int code64; + + ECX = env->eip + next_eip_addend; + env->regs[11] = compute_eflags(); + + code64 = env->hflags & HF_CS64_MASK; + + cpu_x86_set_cpl(env, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eflags &= ~env->fmask; + if (code64) + env->eip = env->lstar; + else + env->eip = env->cstar; + } else +#endif + { + ECX = (uint32_t)(env->eip + next_eip_addend); + + cpu_x86_set_cpl(env, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); + env->eip = (uint32_t)env->star; + } +} + +void helper_sysret(int dflag) +{ + int cpl, selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(EXCP06_ILLOP, 0); + } + cpl = env->hflags & HF_CPL_MASK; + if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { + raise_exception_err(EXCP0D_GPF, 0); + } + selector = (env->star >> 48) & 0xffff; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + if (dflag == 2) { + cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + env->eip = ECX; + } else { + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)ECX; + } + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); + cpu_x86_set_cpl(env, 3); + } else +#endif + { + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)ECX; + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + env->eflags |= IF_MASK; + cpu_x86_set_cpl(env, 3); + } +#ifdef USE_KQEMU + if (kqemu_is_ok(env)) { + if (env->hflags & HF_LMA_MASK) + CC_OP = CC_OP_EFLAGS; + env->exception_index = -1; + cpu_loop_exit(); + } +#endif +} + +/* real mode interrupt */ +static void do_interrupt_real(int intno, int is_int, int error_code, + unsigned int next_eip) +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int selector; + uint32_t offset, esp; + uint32_t old_cs, old_eip; + + /* real mode (simpler !) */ + dt = &env->idt; + if (intno * 4 + 3 > dt->limit) + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); + ptr = dt->base + intno * 4; + offset = lduw_kernel(ptr); + selector = lduw_kernel(ptr + 2); + esp = ESP; + ssp = env->segs[R_SS].base; + if (is_int) + old_eip = next_eip; + else + old_eip = env->eip; + old_cs = env->segs[R_CS].selector; + /* XXX: use SS segment size ? */ + PUSHW(ssp, esp, 0xffff, compute_eflags()); + PUSHW(ssp, esp, 0xffff, old_cs); + PUSHW(ssp, esp, 0xffff, old_eip); + + /* update processor state */ + ESP = (ESP & ~0xffff) | (esp & 0xffff); + env->eip = offset; + env->segs[R_CS].selector = selector; + env->segs[R_CS].base = (selector << 4); + env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); +} + +/* fake user mode interrupt */ +void do_interrupt_user(int intno, int is_int, int error_code, + target_ulong next_eip) +{ + SegmentCache *dt; + target_ulong ptr; + int dpl, cpl; + uint32_t e2; + + dt = &env->idt; + ptr = dt->base + (intno * 8); + e2 = ldl_kernel(ptr + 4); + + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privledge if software int */ + if (is_int && dpl < cpl) + raise_exception_err(EXCP0D_GPF, intno * 8 + 2); + + /* Since we emulate only user space, we cannot do more than + exiting the emulation with the suitable exception and error + code */ + if (is_int) + EIP = next_eip; +} + +/* + * Begin execution of an interruption. is_int is TRUE if coming from + * the int instruction. next_eip is the EIP value AFTER the interrupt + * instruction. It is only relevant if is_int is TRUE. + */ +void do_interrupt(int intno, int is_int, int error_code, + target_ulong next_eip, int is_hw) +{ + if (loglevel & CPU_LOG_INT) { + if ((env->cr[0] & CR0_PE_MASK)) { + static int count; + fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, + count, intno, error_code, is_int, + env->hflags & HF_CPL_MASK, + env->segs[R_CS].selector, EIP, + (int)env->segs[R_CS].base + EIP, + env->segs[R_SS].selector, ESP); + if (intno == 0x0e) { + fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]); + } else { + fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX); + } + fprintf(logfile, "\n"); + cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); +#if 0 + { + int i; + uint8_t *ptr; + fprintf(logfile, " code="); + ptr = env->segs[R_CS].base + env->eip; + for(i = 0; i < 16; i++) { + fprintf(logfile, " %02x", ldub(ptr + i)); + } + fprintf(logfile, "\n"); + } +#endif + count++; + } + } + if (env->cr[0] & CR0_PE_MASK) { +#if TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + do_interrupt64(intno, is_int, error_code, next_eip, is_hw); + } else +#endif + { + do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); + } + } else { + do_interrupt_real(intno, is_int, error_code, next_eip); + } +} + +/* + * Signal an interruption. It is executed in the main CPU loop. + * is_int is TRUE if coming from the int instruction. next_eip is the + * EIP value AFTER the interrupt instruction. It is only relevant if + * is_int is TRUE. + */ +void raise_interrupt(int intno, int is_int, int error_code, + int next_eip_addend) +{ + env->exception_index = intno; + env->error_code = error_code; + env->exception_is_int = is_int; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(); +} + +/* same as raise_exception_err, but do not restore global registers */ +static void raise_exception_err_norestore(int exception_index, int error_code) +{ + env->exception_index = exception_index; + env->error_code = error_code; + env->exception_is_int = 0; + env->exception_next_eip = 0; + longjmp(env->jmp_env, 1); +} + +/* shortcuts to generate exceptions */ + +void (raise_exception_err)(int exception_index, int error_code) +{ + raise_interrupt(exception_index, 0, error_code, 0); +} + +void raise_exception(int exception_index) +{ + raise_interrupt(exception_index, 0, 0, 0); +} + +#ifdef BUGGY_GCC_DIV64 +/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we + call it from another function */ +uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den) +{ + *q_ptr = num / den; + return num % den; +} + +int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den) +{ + *q_ptr = num / den; + return num % den; +} +#endif + +void helper_divl_EAX_T0(void) +{ + unsigned int den, r; + uint64_t num, q; + + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + den = T0; + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } +#ifdef BUGGY_GCC_DIV64 + r = div32(&q, num, den); +#else + q = (num / den); + r = (num % den); +#endif + if (q > 0xffffffff) + raise_exception(EXCP00_DIVZ); + EAX = (uint32_t)q; + EDX = (uint32_t)r; +} + +void helper_idivl_EAX_T0(void) +{ + int den, r; + int64_t num, q; + + num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + den = T0; + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } +#ifdef BUGGY_GCC_DIV64 + r = idiv32(&q, num, den); +#else + q = (num / den); + r = (num % den); +#endif + if (q != (int32_t)q) + raise_exception(EXCP00_DIVZ); + EAX = (uint32_t)q; + EDX = (uint32_t)r; +} + +void helper_cmpxchg8b(void) +{ + uint64_t d; + int eflags; + + eflags = cc_table[CC_OP].compute_all(); + d = ldq(A0); + if (d == (((uint64_t)EDX << 32) | EAX)) { + stq(A0, ((uint64_t)ECX << 32) | EBX); + eflags |= CC_Z; + } else { + EDX = d >> 32; + EAX = d; + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} + +void helper_cpuid(void) +{ + uint32_t index; + index = (uint32_t)EAX; + + /* test if maximum index reached */ + if (index & 0x80000000) { + if (index > env->cpuid_xlevel) + index = env->cpuid_level; + } else { + if (index > env->cpuid_level) + index = env->cpuid_level; + } + + switch(index) { + case 0: + EAX = env->cpuid_level; + EBX = env->cpuid_vendor1; + EDX = env->cpuid_vendor2; + ECX = env->cpuid_vendor3; + break; + case 1: + EAX = env->cpuid_version; + EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ + ECX = env->cpuid_ext_features; + EDX = env->cpuid_features; + break; + case 2: + /* cache info: needed for Pentium Pro compatibility */ + EAX = 0x410601; + EBX = 0; + ECX = 0; + EDX = 0; + break; + case 0x80000000: + EAX = env->cpuid_xlevel; + EBX = env->cpuid_vendor1; + EDX = env->cpuid_vendor2; + ECX = env->cpuid_vendor3; + break; + case 0x80000001: + EAX = env->cpuid_features; + EBX = 0; + ECX = 0; + EDX = env->cpuid_ext2_features; + break; + case 0x80000002: + case 0x80000003: + case 0x80000004: + EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0]; + EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1]; + ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2]; + EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3]; + break; + case 0x80000005: + /* cache info (L1 cache) */ + EAX = 0x01ff01ff; + EBX = 0x01ff01ff; + ECX = 0x40020140; + EDX = 0x40020140; + break; + case 0x80000006: + /* cache info (L2 cache) */ + EAX = 0; + EBX = 0x42004200; + ECX = 0x02008140; + EDX = 0; + break; + case 0x80000008: + /* virtual & phys address size in low 2 bytes. */ + EAX = 0x00003028; + EBX = 0; + ECX = 0; + EDX = 0; + break; + default: + /* reserved values: zero */ + EAX = 0; + EBX = 0; + ECX = 0; + EDX = 0; + break; + } +} + +void helper_enter_level(int level, int data32) +{ + target_ulong ssp; + uint32_t esp_mask, esp, ebp; + + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + ebp = EBP; + esp = ESP; + if (data32) { + /* 32 bit */ + esp -= 4; + while (--level) { + esp -= 4; + ebp -= 4; + stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask))); + } + esp -= 4; + stl(ssp + (esp & esp_mask), T1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask))); + } + esp -= 2; + stw(ssp + (esp & esp_mask), T1); + } +} + +#ifdef TARGET_X86_64 +void helper_enter64_level(int level, int data64) +{ + target_ulong esp, ebp; + ebp = EBP; + esp = ESP; + + if (data64) { + /* 64 bit */ + esp -= 8; + while (--level) { + esp -= 8; + ebp -= 8; + stq(esp, ldq(ebp)); + } + esp -= 8; + stq(esp, T1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + stw(esp, lduw(ebp)); + } + esp -= 2; + stw(esp, T1); + } +} +#endif + +void helper_lldt_T0(void) +{ + int selector; + SegmentCache *dt; + uint32_t e1, e2; + int index, entry_limit; + target_ulong ptr; + + selector = T0 & 0xffff; + if ((selector & 0xfffc) == 0) { + /* XXX: NULL selector case: invalid LDT */ + env->ldt.base = 0; + env->ldt.limit = 0; + } else { + if (selector & 0x4) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) + entry_limit = 15; + else +#endif + entry_limit = 7; + if ((index + entry_limit) > dt->limit) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3; + e3 = ldl_kernel(ptr + 8); + load_seg_cache_raw_dt(&env->ldt, e1, e2); + env->ldt.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + } + env->ldt.selector = selector; +} + +void helper_ltr_T0(void) +{ + int selector; + SegmentCache *dt; + uint32_t e1, e2; + int index, type, entry_limit; + target_ulong ptr; + + selector = T0 & 0xffff; + if ((selector & 0xfffc) == 0) { + /* NULL selector case: invalid TR */ + env->tr.base = 0; + env->tr.limit = 0; + env->tr.flags = 0; + } else { + if (selector & 0x4) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) + entry_limit = 15; + else +#endif + entry_limit = 7; + if ((index + entry_limit) > dt->limit) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((e2 & DESC_S_MASK) || + (type != 1 && type != 9)) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3; + e3 = ldl_kernel(ptr + 8); + load_seg_cache_raw_dt(&env->tr, e1, e2); + env->tr.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->tr, e1, e2); + } + e2 |= DESC_TSS_BUSY_MASK; + stl_kernel(ptr + 4, e2); + } + env->tr.selector = selector; +} + +/* only works if protected mode and not VM86. seg_reg must be != R_CS */ +void load_seg(int seg_reg, int selector) +{ + uint32_t e1, e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + selector &= 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) + raise_exception_err(EXCP0D_GPF, 0); + cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); + } else { + + if (selector & 0x4) + dt = &env->ldt; + else + dt = &env->gdt; + index = selector & ~7; + if ((index + 7) > dt->limit) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + ptr = dt->base + index; + e1 = ldl_kernel(ptr); + e2 = ldl_kernel(ptr + 4); + + if (!(e2 & DESC_S_MASK)) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (rpl != cpl || dpl != cpl) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) + raise_exception_err(EXCP0C_STACK, selector & 0xfffc); + else + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); + } + + /* set the access bit if not already set */ + if (!(e2 & DESC_A_MASK)) { + e2 |= DESC_A_MASK; + stl_kernel(ptr + 4, e2); + } + + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); +#if 0 + fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", + selector, (unsigned long)sc->base, sc->limit, sc->flags); +#endif + } +} + +/* protected mode jump */ +void helper_ljmp_protected_T0_T1(int next_eip_addend) +{ + int new_cs, gate_cs, type; + uint32_t e1, e2, cpl, dpl, rpl, limit; + target_ulong new_eip, next_eip; + + new_cs = T0; + new_eip = T1; + if ((new_cs & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, 0); + if (load_segment(&e1, &e2, new_cs) != 0) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + if (dpl != cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); + limit = get_seg_limit(e1, e2); + if (new_eip > limit && + !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + } else { + /* jump to call or task gate */ + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + cpl = env->hflags & HF_CPL_MASK; + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch(type) { + case 1: /* 286 TSS */ + case 9: /* 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + next_eip = env->eip + next_eip_addend; + switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); + CC_OP = CC_OP_EFLAGS; + break; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + if ((dpl < cpl) || (dpl < rpl)) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); + gate_cs = e1 >> 16; + new_eip = (e1 & 0xffff); + if (type == 12) + new_eip |= (e2 & 0xffff0000); + if (load_segment(&e1, &e2, gate_cs) != 0) + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + /* must be code segment */ + if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != + (DESC_S_MASK | DESC_CS_MASK))) + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); + if (((e2 & DESC_C_MASK) && (dpl > cpl)) || + (!(e2 & DESC_C_MASK) && (dpl != cpl))) + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); + limit = get_seg_limit(e1, e2); + if (new_eip > limit) + raise_exception_err(EXCP0D_GPF, 0); + cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + break; + default: + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + break; + } + } +} + +/* real mode call */ +void helper_lcall_real_T0_T1(int shift, int next_eip) +{ + int new_cs, new_eip; + uint32_t esp, esp_mask; + target_ulong ssp; + + new_cs = T0; + new_eip = T1; + esp = ESP; + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, esp_mask, next_eip); + } else { + PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, esp_mask, next_eip); + } + + ESP = (ESP & ~esp_mask) | (esp & esp_mask); + env->eip = new_eip; + env->segs[R_CS].selector = new_cs; + env->segs[R_CS].base = (new_cs << 4); +} + +/* protected mode call */ +void helper_lcall_protected_T0_T1(int shift, int next_eip_addend) +{ + int new_cs, new_stack, i; + uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; + uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; + uint32_t val, limit, old_sp_mask; + target_ulong ssp, old_ssp, next_eip, new_eip; + + new_cs = T0; + new_eip = T1; + next_eip = env->eip + next_eip_addend; +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) { + fprintf(logfile, "lcall %04x:%08x s=%d\n", + new_cs, (uint32_t)new_eip, shift); + cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); + } +#endif + if ((new_cs & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, 0); + if (load_segment(&e1, &e2, new_cs) != 0) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + cpl = env->hflags & HF_CPL_MASK; +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) { + fprintf(logfile, "desc=%08x:%08x\n", e1, e2); + } +#endif + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + if (dpl != cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); + +#ifdef TARGET_X86_64 + /* XXX: check 16/32 bit cases in long mode */ + if (shift == 2) { + target_ulong rsp; + /* 64 bit case */ + rsp = ESP; + PUSHQ(rsp, env->segs[R_CS].selector); + PUSHQ(rsp, next_eip); + /* from this point, not restartable */ + ESP = rsp; + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), e2); + EIP = new_eip; + } else +#endif + { + sp = ESP; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + limit = get_seg_limit(e1, e2); + if (new_eip > limit) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + /* from this point, not restartable */ + ESP = (ESP & ~sp_mask) | (sp & sp_mask); + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + EIP = new_eip; + } + } else { + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + switch(type) { + case 1: /* available 286 TSS */ + case 9: /* available 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); + CC_OP = CC_OP_EFLAGS; + return; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + break; + default: + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + break; + } + shift = type >> 3; + + if (dpl < cpl || dpl < rpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + param_count = e2 & 0x1f; + if ((selector & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, 0); + + if (load_segment(&e1, &e2, selector) != 0) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) + raise_exception_err(EXCP0D_GPF, selector & 0xfffc); + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); + + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner priviledge */ + get_ss_esp_from_tss(&ss, &sp, dpl); +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) + fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", + ss, sp, param_count, ESP); +#endif + if ((ss & 0xfffc) == 0) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if ((ss & 3) != dpl) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (load_segment(&ss_e1, &ss_e2, ss) != 0) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + if (!(ss_e2 & DESC_P_MASK)) + raise_exception_err(EXCP0A_TSS, ss & 0xfffc); + + // push_size = ((param_count * 2) + 8) << shift; + + old_sp_mask = get_sp_mask(env->segs[R_SS].flags); + old_ssp = env->segs[R_SS].base; + + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, sp, sp_mask, ESP); + for(i = param_count - 1; i >= 0; i--) { + val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask)); + PUSHL(ssp, sp, sp_mask, val); + } + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, sp, sp_mask, ESP); + for(i = param_count - 1; i >= 0; i--) { + val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask)); + PUSHW(ssp, sp, sp_mask, val); + } + } + new_stack = 1; + } else { + /* to same priviledge */ + sp = ESP; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + // push_size = (4 << shift); + new_stack = 0; + } + + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + /* from this point, not restartable */ + + if (new_stack) { + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, dpl); + ESP = (ESP & ~sp_mask) | (sp & sp_mask); + EIP = offset; + } +#ifdef USE_KQEMU + if (kqemu_is_ok(env)) { + env->exception_index = -1; + cpu_loop_exit(); + } +#endif +} + +/* real and vm86 mode iret */ +void helper_iret_real(int shift) +{ + uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; + target_ulong ssp; + int eflags_mask; + + sp_mask = 0xffff; /* XXXX: use SS segment size ? */ + sp = ESP; + ssp = env->segs[R_SS].base; + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + POPL(ssp, sp, sp_mask, new_eflags); + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + POPW(ssp, sp, sp_mask, new_eflags); + } + ESP = (ESP & ~sp_mask) | (sp & sp_mask); + load_seg_vm(R_CS, new_cs); + env->eip = new_eip; + if (env->eflags & VM_MASK) + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; + else + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK; + if (shift == 0) + eflags_mask &= 0xffff; + load_eflags(new_eflags, eflags_mask); +} + +static inline void validate_seg(int seg_reg, int cpl) +{ + int dpl; + uint32_t e2; + + /* XXX: on x86_64, we do not want to nullify FS and GS because + they may still contain a valid base. I would be interested to + know how a real x86_64 CPU behaves */ + if ((seg_reg == R_FS || seg_reg == R_GS) && + (env->segs[seg_reg].selector & 0xfffc) == 0) + return; + + e2 = env->segs[seg_reg].flags; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* data or non conforming code segment */ + if (dpl < cpl) { + cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); + } + } +} + +/* protected mode iret */ +static inline void helper_ret_protected(int shift, int is_iret, int addend) +{ + uint32_t new_cs, new_eflags, new_ss; + uint32_t new_es, new_ds, new_fs, new_gs; + uint32_t e1, e2, ss_e1, ss_e2; + int cpl, dpl, rpl, eflags_mask, iopl; + target_ulong ssp, sp, new_eip, new_esp, sp_mask; + +#ifdef TARGET_X86_64 + if (shift == 2) + sp_mask = -1; + else +#endif + sp_mask = get_sp_mask(env->segs[R_SS].flags); + sp = ESP; + ssp = env->segs[R_SS].base; + new_eflags = 0; /* avoid warning */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_eip); + POPQ(sp, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPQ(sp, new_eflags); + } + } else +#endif + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPL(ssp, sp, sp_mask, new_eflags); + if (new_eflags & VM_MASK) + goto return_to_vm86; + } + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + if (is_iret) + POPW(ssp, sp, sp_mask, new_eflags); + } +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) { + fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", + new_cs, new_eip, shift, addend); + cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); + } +#endif + if ((new_cs & 0xfffc) == 0) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + if (load_segment(&e1, &e2, new_cs) != 0) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + if (!(e2 & DESC_S_MASK) || + !(e2 & DESC_CS_MASK)) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + cpl = env->hflags & HF_CPL_MASK; + rpl = new_cs & 3; + if (rpl < cpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + if (dpl > rpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } else { + if (dpl != rpl) + raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); + + sp += addend; + if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || + ((env->hflags & HF_CS64_MASK) && !is_iret))) { + /* return to same priledge level */ + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + /* return to different priviledge level */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_esp); + POPQ(sp, new_ss); + new_ss &= 0xffff; + } else +#endif + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + new_ss &= 0xffff; + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_esp); + POPW(ssp, sp, sp_mask, new_ss); + } +#ifdef DEBUG_PCALL + if (loglevel & CPU_LOG_PCALL) { + fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n", + new_ss, new_esp); + } +#endif + if ((new_ss & 0xfffc) == 0) { +#ifdef TARGET_X86_64 + /* NULL ss is allowed in long mode if cpl != 3*/ + if ((env->hflags & HF_LMA_MASK) && rpl != 3) { + cpu_x86_load_seg_cache(env, R_SS, new_ss, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } else +#endif + { + raise_exception_err(EXCP0D_GPF, 0); + } + } else { + if ((new_ss & 3) != rpl) + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (dpl != rpl) + raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc); + if (!(ss_e2 & DESC_P_MASK)) + raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc); + cpu_x86_load_seg_cache(env, R_SS, new_ss, + get_seg_base(ss_e1, ss_e2), + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + cpu_x86_set_cpl(env, rpl); + sp = new_esp; +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) + sp_mask = -1; + else +#endif + sp_mask = get_sp_mask(ss_e2); + + /* validate data segments */ + validate_seg(R_ES, rpl); + validate_seg(R_DS, rpl); + validate_seg(R_FS, rpl); + validate_seg(R_GS, rpl); + + sp += addend; + } + ESP = (ESP & ~sp_mask) | (sp & sp_mask); + env->eip = new_eip; + if (is_iret) { + /* NOTE: 'cpl' is the _old_ CPL */ + eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; + if (cpl == 0) + eflags_mask |= IOPL_MASK; + iopl = (env->eflags >> IOPL_SHIFT) & 3; + if (cpl <= iopl) + eflags_mask |= IF_MASK; + if (shift == 0) + eflags_mask &= 0xffff; + load_eflags(new_eflags, eflags_mask); + } + return; + + return_to_vm86: + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + POPL(ssp, sp, sp_mask, new_es); + POPL(ssp, sp, sp_mask, new_ds); + POPL(ssp, sp, sp_mask, new_fs); + POPL(ssp, sp, sp_mask, new_gs); + + /* modify processor state */ + load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); + load_seg_vm(R_CS, new_cs & 0xffff); + cpu_x86_set_cpl(env, 3); + load_seg_vm(R_SS, new_ss & 0xffff); + load_seg_vm(R_ES, new_es & 0xffff); + load_seg_vm(R_DS, new_ds & 0xffff); + load_seg_vm(R_FS, new_fs & 0xffff); + load_seg_vm(R_GS, new_gs & 0xffff); + + env->eip = new_eip & 0xffff; + ESP = new_esp; +} + +void helper_iret_protected(int shift, int next_eip) +{ + int tss_selector, type; + uint32_t e1, e2; + + /* specific case for TSS */ + if (env->eflags & NT_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) + raise_exception_err(EXCP0D_GPF, 0); +#endif + tss_selector = lduw_kernel(env->tr.base + 0); + if (tss_selector & 4) + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); + if (load_segment(&e1, &e2, tss_selector) != 0) + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); + type = (e2 >> DESC_TYPE_SHIFT) & 0x17; + /* NOTE: we check both segment and busy TSS */ + if (type != 3) + raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); + switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); + } else { + helper_ret_protected(shift, 1, 0); + } +#ifdef USE_KQEMU + if (kqemu_is_ok(env)) { + CC_OP = CC_OP_EFLAGS; + env->exception_index = -1; + cpu_loop_exit(); + } +#endif +} + +void helper_lret_protected(int shift, int addend) +{ + helper_ret_protected(shift, 0, addend); +#ifdef USE_KQEMU + if (kqemu_is_ok(env)) { + env->exception_index = -1; + cpu_loop_exit(); + } +#endif +} + +void helper_sysenter(void) +{ + if (env->sysenter_cs == 0) { + raise_exception_err(EXCP0D_GPF, 0); + } + env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); + cpu_x86_set_cpl(env, 0); + cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + ESP = env->sysenter_esp; + EIP = env->sysenter_eip; +} + +void helper_sysexit(void) +{ + int cpl; + + cpl = env->hflags & HF_CPL_MASK; + if (env->sysenter_cs == 0 || cpl != 0) { + raise_exception_err(EXCP0D_GPF, 0); + } + cpu_x86_set_cpl(env, 3); + cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + ESP = ECX; + EIP = EDX; +#ifdef USE_KQEMU + if (kqemu_is_ok(env)) { + env->exception_index = -1; + cpu_loop_exit(); + } +#endif +} + +void helper_movl_crN_T0(int reg) +{ +#if !defined(CONFIG_USER_ONLY) + switch(reg) { + case 0: + cpu_x86_update_cr0(env, T0); + break; + case 3: + cpu_x86_update_cr3(env, T0); + break; + case 4: + cpu_x86_update_cr4(env, T0); + break; + case 8: + cpu_set_apic_tpr(env, T0); + break; + default: + env->cr[reg] = T0; + break; + } +#endif +} + +/* XXX: do more */ +void helper_movl_drN_T0(int reg) +{ + env->dr[reg] = T0; +} + +void helper_invlpg(target_ulong addr) +{ + cpu_x86_flush_tlb(env, addr); +} + +void helper_rdtsc(void) +{ + uint64_t val; + + if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { + raise_exception(EXCP0D_GPF); + } + val = cpu_get_tsc(env); + EAX = (uint32_t)(val); + EDX = (uint32_t)(val >> 32); +} + +#if defined(CONFIG_USER_ONLY) +void helper_wrmsr(void) +{ +} + +void helper_rdmsr(void) +{ +} +#else +void helper_wrmsr(void) +{ + uint64_t val; + + val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32); + + switch((uint32_t)ECX) { + case MSR_IA32_SYSENTER_CS: + env->sysenter_cs = val & 0xffff; + break; + case MSR_IA32_SYSENTER_ESP: + env->sysenter_esp = val; + break; + case MSR_IA32_SYSENTER_EIP: + env->sysenter_eip = val; + break; + case MSR_IA32_APICBASE: + cpu_set_apic_base(env, val); + break; + case MSR_EFER: + { + uint64_t update_mask; + update_mask = 0; + if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) + update_mask |= MSR_EFER_SCE; + if (env->cpuid_ext2_features & CPUID_EXT2_LM) + update_mask |= MSR_EFER_LME; + if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) + update_mask |= MSR_EFER_FFXSR; + if (env->cpuid_ext2_features & CPUID_EXT2_NX) + update_mask |= MSR_EFER_NXE; + env->efer = (env->efer & ~update_mask) | + (val & update_mask); + } + break; + case MSR_STAR: + env->star = val; + break; + case MSR_PAT: + env->pat = val; + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + env->lstar = val; + break; + case MSR_CSTAR: + env->cstar = val; + break; + case MSR_FMASK: + env->fmask = val; + break; + case MSR_FSBASE: + env->segs[R_FS].base = val; + break; + case MSR_GSBASE: + env->segs[R_GS].base = val; + break; + case MSR_KERNELGSBASE: + env->kernelgsbase = val; + break; +#endif + default: + /* XXX: exception ? */ + break; + } +} + +void helper_rdmsr(void) +{ + uint64_t val; + switch((uint32_t)ECX) { + case MSR_IA32_SYSENTER_CS: + val = env->sysenter_cs; + break; + case MSR_IA32_SYSENTER_ESP: + val = env->sysenter_esp; + break; + case MSR_IA32_SYSENTER_EIP: + val = env->sysenter_eip; + break; + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(env); + break; + case MSR_EFER: + val = env->efer; + break; + case MSR_STAR: + val = env->star; + break; + case MSR_PAT: + val = env->pat; + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + val = env->lstar; + break; + case MSR_CSTAR: + val = env->cstar; + break; + case MSR_FMASK: + val = env->fmask; + break; + case MSR_FSBASE: + val = env->segs[R_FS].base; + break; + case MSR_GSBASE: + val = env->segs[R_GS].base; + break; + case MSR_KERNELGSBASE: + val = env->kernelgsbase; + break; +#endif + default: + /* XXX: exception ? */ + val = 0; + break; + } + EAX = (uint32_t)(val); + EDX = (uint32_t)(val >> 32); +} +#endif + +void helper_lsl(void) +{ + unsigned int selector, limit; + uint32_t e1, e2, eflags; + int rpl, dpl, cpl, type; + + eflags = cc_table[CC_OP].compute_all(); + selector = T0 & 0xffff; + if (load_segment(&e1, &e2, selector) != 0) + goto fail; + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) + goto fail; + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch(type) { + case 1: + case 2: + case 3: + case 9: + case 11: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + limit = get_seg_limit(e1, e2); + T1 = limit; + CC_SRC = eflags | CC_Z; +} + +void helper_lar(void) +{ + unsigned int selector; + uint32_t e1, e2, eflags; + int rpl, dpl, cpl, type; + + eflags = cc_table[CC_OP].compute_all(); + selector = T0 & 0xffff; + if ((selector & 0xfffc) == 0) + goto fail; + if (load_segment(&e1, &e2, selector) != 0) + goto fail; + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) + goto fail; + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch(type) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 9: + case 11: + case 12: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + T1 = e2 & 0x00f0ff00; + CC_SRC = eflags | CC_Z; +} + +void helper_verr(void) +{ + unsigned int selector; + uint32_t e1, e2, eflags; + int rpl, dpl, cpl; + + eflags = cc_table[CC_OP].compute_all(); + selector = T0 & 0xffff; + if ((selector & 0xfffc) == 0) + goto fail; + if (load_segment(&e1, &e2, selector) != 0) + goto fail; + if (!(e2 & DESC_S_MASK)) + goto fail; + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + if (!(e2 & DESC_R_MASK)) + goto fail; + if (!(e2 & DESC_C_MASK)) { + if (dpl < cpl || dpl < rpl) + goto fail; + } + } else { + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +void helper_verw(void) +{ + unsigned int selector; + uint32_t e1, e2, eflags; + int rpl, dpl, cpl; + + eflags = cc_table[CC_OP].compute_all(); + selector = T0 & 0xffff; + if ((selector & 0xfffc) == 0) + goto fail; + if (load_segment(&e1, &e2, selector) != 0) + goto fail; + if (!(e2 & DESC_S_MASK)) + goto fail; + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + goto fail; + } else { + if (dpl < cpl || dpl < rpl) + goto fail; + if (!(e2 & DESC_W_MASK)) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +/* FPU helpers */ + +void helper_fldt_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = helper_fldt(A0); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fstt_ST0_A0(void) +{ + helper_fstt(ST0, A0); +} + +void fpu_set_exception(int mask) +{ + env->fpus |= mask; + if (env->fpus & (~env->fpuc & FPUC_EM)) + env->fpus |= FPUS_SE | FPUS_B; +} + +CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b) +{ + if (b == 0.0) + fpu_set_exception(FPUS_ZE); + return a / b; +} + +void fpu_raise_exception(void) +{ + if (env->cr[0] & CR0_NE_MASK) { + raise_exception(EXCP10_COPR); + } +#if !defined(CONFIG_USER_ONLY) + else { + cpu_set_ferr(env); + } +#endif +} + +/* BCD ops */ + +void helper_fbld_ST0_A0(void) +{ + CPU86_LDouble tmp; + uint64_t val; + unsigned int v; + int i; + + val = 0; + for(i = 8; i >= 0; i--) { + v = ldub(A0 + i); + val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); + } + tmp = val; + if (ldub(A0 + 9) & 0x80) + tmp = -tmp; + fpush(); + ST0 = tmp; +} + +void helper_fbst_ST0_A0(void) +{ + int v; + target_ulong mem_ref, mem_end; + int64_t val; + + val = floatx_to_int64(ST0, &env->fp_status); + mem_ref = A0; + mem_end = mem_ref + 9; + if (val < 0) { + stb(mem_end, 0x80); + val = -val; + } else { + stb(mem_end, 0x00); + } + while (mem_ref < mem_end) { + if (val == 0) + break; + v = val % 100; + val = val / 100; + v = ((v / 10) << 4) | (v % 10); + stb(mem_ref++, v); + } + while (mem_ref < mem_end) { + stb(mem_ref++, 0); + } +} + +void helper_f2xm1(void) +{ + ST0 = pow(2.0,ST0) - 1.0; +} + +void helper_fyl2x(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if (fptemp>0.0){ + fptemp = log(fptemp)/log(2.0); /* log2(ST) */ + ST1 *= fptemp; + fpop(); + } else { + env->fpus &= (~0x4700); + env->fpus |= 0x400; + } +} + +void helper_fptan(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = tan(fptemp); + fpush(); + ST0 = 1.0; + env->fpus &= (~0x400); /* C2 <-- 0 */ + /* the above code is for |arg| < 2**52 only */ + } +} + +void helper_fpatan(void) +{ + CPU86_LDouble fptemp, fpsrcop; + + fpsrcop = ST1; + fptemp = ST0; + ST1 = atan2(fpsrcop,fptemp); + fpop(); +} + +void helper_fxtract(void) +{ + CPU86_LDoubleU temp; + unsigned int expdif; + + temp.d = ST0; + expdif = EXPD(temp) - EXPBIAS; + /*DP exponent bias*/ + ST0 = expdif; + fpush(); + BIASEXPONENT(temp); + ST0 = temp.d; +} + +void helper_fprem1(void) +{ + CPU86_LDouble dblq, fpsrcop, fptemp; + CPU86_LDoubleU fpsrcop1, fptemp1; + int expdif; + int q; + + fpsrcop = ST0; + fptemp = ST1; + fpsrcop1.d = fpsrcop; + fptemp1.d = fptemp; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + if (expdif < 53) { + dblq = fpsrcop / fptemp; + dblq = (dblq < 0.0)? ceil(dblq): floor(dblq); + ST0 = fpsrcop - fptemp*dblq; + q = (int)dblq; /* cutting off top bits is assumed here */ + env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C1,C3) <-- (q2,q1,q0) */ + env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */ + env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */ + env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */ + } else { + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, expdif-50); + fpsrcop = (ST0 / ST1) / fptemp; + /* fpsrcop = integer obtained by rounding to the nearest */ + fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)? + floor(fpsrcop): ceil(fpsrcop); + ST0 -= (ST1 * fpsrcop * fptemp); + } +} + +void helper_fprem(void) +{ + CPU86_LDouble dblq, fpsrcop, fptemp; + CPU86_LDoubleU fpsrcop1, fptemp1; + int expdif; + int q; + + fpsrcop = ST0; + fptemp = ST1; + fpsrcop1.d = fpsrcop; + fptemp1.d = fptemp; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + if ( expdif < 53 ) { + dblq = fpsrcop / fptemp; + dblq = (dblq < 0.0)? ceil(dblq): floor(dblq); + ST0 = fpsrcop - fptemp*dblq; + q = (int)dblq; /* cutting off top bits is assumed here */ + env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C1,C3) <-- (q2,q1,q0) */ + env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */ + env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */ + env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */ + } else { + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, expdif-50); + fpsrcop = (ST0 / ST1) / fptemp; + /* fpsrcop = integer obtained by chopping */ + fpsrcop = (fpsrcop < 0.0)? + -(floor(fabs(fpsrcop))): floor(fpsrcop); + ST0 -= (ST1 * fpsrcop * fptemp); + } +} + +void helper_fyl2xp1(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if ((fptemp+1.0)>0.0) { + fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */ + ST1 *= fptemp; + fpop(); + } else { + env->fpus &= (~0x4700); + env->fpus |= 0x400; + } +} + +void helper_fsqrt(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if (fptemp<0.0) { + env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ + env->fpus |= 0x400; + } + ST0 = sqrt(fptemp); +} + +void helper_fsincos(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = sin(fptemp); + fpush(); + ST0 = cos(fptemp); + env->fpus &= (~0x400); /* C2 <-- 0 */ + /* the above code is for |arg| < 2**63 only */ + } +} + +void helper_frndint(void) +{ + ST0 = floatx_round_to_int(ST0, &env->fp_status); +} + +void helper_fscale(void) +{ + ST0 = ldexp (ST0, (int)(ST1)); +} + +void helper_fsin(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = sin(fptemp); + env->fpus &= (~0x400); /* C2 <-- 0 */ + /* the above code is for |arg| < 2**53 only */ + } +} + +void helper_fcos(void) +{ + CPU86_LDouble fptemp; + + fptemp = ST0; + if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = cos(fptemp); + env->fpus &= (~0x400); /* C2 <-- 0 */ + /* the above code is for |arg5 < 2**63 only */ + } +} + +void helper_fxam_ST0(void) +{ + CPU86_LDoubleU temp; + int expdif; + + temp.d = ST0; + + env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */ + if (SIGND(temp)) + env->fpus |= 0x200; /* C1 <-- 1 */ + + /* XXX: test fptags too */ + expdif = EXPD(temp); + if (expdif == MAXEXPD) { +#ifdef USE_X86LDOUBLE + if (MANTD(temp) == 0x8000000000000000ULL) +#else + if (MANTD(temp) == 0) +#endif + env->fpus |= 0x500 /*Infinity*/; + else + env->fpus |= 0x100 /*NaN*/; + } else if (expdif == 0) { + if (MANTD(temp) == 0) + env->fpus |= 0x4000 /*Zero*/; + else + env->fpus |= 0x4400 /*Denormal*/; + } else { + env->fpus |= 0x400; + } +} + +void helper_fstenv(target_ulong ptr, int data32) +{ + int fpus, fptag, exp, i; + uint64_t mant; + CPU86_LDoubleU tmp; + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i=7; i>=0; i--) { + fptag <<= 2; + if (env->fptags[i]) { + fptag |= 3; + } else { + tmp.d = env->fpregs[i].d; + exp = EXPD(tmp); + mant = MANTD(tmp); + if (exp == 0 && mant == 0) { + /* zero */ + fptag |= 1; + } else if (exp == 0 || exp == MAXEXPD +#ifdef USE_X86LDOUBLE + || (mant & (1LL << 63)) == 0 +#endif + ) { + /* NaNs, infinity, denormal */ + fptag |= 2; + } + } + } + if (data32) { + /* 32 bit */ + stl(ptr, env->fpuc); + stl(ptr + 4, fpus); + stl(ptr + 8, fptag); + stl(ptr + 12, 0); /* fpip */ + stl(ptr + 16, 0); /* fpcs */ + stl(ptr + 20, 0); /* fpoo */ + stl(ptr + 24, 0); /* fpos */ + } else { + /* 16 bit */ + stw(ptr, env->fpuc); + stw(ptr + 2, fpus); + stw(ptr + 4, fptag); + stw(ptr + 6, 0); + stw(ptr + 8, 0); + stw(ptr + 10, 0); + stw(ptr + 12, 0); + } +} + +void helper_fldenv(target_ulong ptr, int data32) +{ + int i, fpus, fptag; + + if (data32) { + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 4); + fptag = lduw(ptr + 8); + } + else { + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 2); + fptag = lduw(ptr + 4); + } + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + for(i = 0;i < 8; i++) { + env->fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } +} + +void helper_fsave(target_ulong ptr, int data32) +{ + CPU86_LDouble tmp; + int i; + + helper_fstenv(ptr, data32); + + ptr += (14 << data32); + for(i = 0;i < 8; i++) { + tmp = ST(i); + helper_fstt(tmp, ptr); + ptr += 10; + } + + /* fninit */ + env->fpus = 0; + env->fpstt = 0; + env->fpuc = 0x37f; + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +void helper_frstor(target_ulong ptr, int data32) +{ + CPU86_LDouble tmp; + int i; + + helper_fldenv(ptr, data32); + ptr += (14 << data32); + + for(i = 0;i < 8; i++) { + tmp = helper_fldt(ptr); + ST(i) = tmp; + ptr += 10; + } +} + +void helper_fxsave(target_ulong ptr, int data64) +{ + int fpus, fptag, i, nb_xmm_regs; + CPU86_LDouble tmp; + target_ulong addr; + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for(i = 0; i < 8; i++) { + fptag |= (env->fptags[i] << i); + } + stw(ptr, env->fpuc); + stw(ptr + 2, fpus); + stw(ptr + 4, fptag ^ 0xff); + + addr = ptr + 0x20; + for(i = 0;i < 8; i++) { + tmp = ST(i); + helper_fstt(tmp, addr); + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + stl(ptr + 0x18, env->mxcsr); /* mxcsr */ + stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ + nb_xmm_regs = 8 << data64; + addr = ptr + 0xa0; + for(i = 0; i < nb_xmm_regs; i++) { + stq(addr, env->xmm_regs[i].XMM_Q(0)); + stq(addr + 8, env->xmm_regs[i].XMM_Q(1)); + addr += 16; + } + } +} + +void helper_fxrstor(target_ulong ptr, int data64) +{ + int i, fpus, fptag, nb_xmm_regs; + CPU86_LDouble tmp; + target_ulong addr; + + env->fpuc = lduw(ptr); + fpus = lduw(ptr + 2); + fptag = lduw(ptr + 4); + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + fptag ^= 0xff; + for(i = 0;i < 8; i++) { + env->fptags[i] = ((fptag >> i) & 1); + } + + addr = ptr + 0x20; + for(i = 0;i < 8; i++) { + tmp = helper_fldt(addr); + ST(i) = tmp; + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + env->mxcsr = ldl(ptr + 0x18); + //ldl(ptr + 0x1c); + nb_xmm_regs = 8 << data64; + addr = ptr + 0xa0; + for(i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].XMM_Q(0) = ldq(addr); + env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8); + addr += 16; + } + } +} + +#ifndef USE_X86LDOUBLE + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) +{ + CPU86_LDoubleU temp; + int e; + + temp.d = f; + /* mantissa */ + *pmant = (MANTD(temp) << 11) | (1LL << 63); + /* exponent + sign */ + e = EXPD(temp) - EXPBIAS + 16383; + e |= SIGND(temp) >> 16; + *pexp = e; +} + +CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) +{ + CPU86_LDoubleU temp; + int e; + uint64_t ll; + + /* XXX: handle overflow ? */ + e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */ + e |= (upper >> 4) & 0x800; /* sign */ + ll = (mant >> 11) & ((1LL << 52) - 1); +#ifdef __arm__ + temp.l.upper = (e << 20) | (ll >> 32); + temp.l.lower = ll; +#else + temp.ll = ll | ((uint64_t)e << 52); +#endif + return temp.d; +} + +#else + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f) +{ + CPU86_LDoubleU temp; + + temp.d = f; + *pmant = temp.l.lower; + *pexp = temp.l.upper; +} + +CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper) +{ + CPU86_LDoubleU temp; + + temp.l.upper = upper; + temp.l.lower = mant; + return temp.d; +} +#endif + +#ifdef TARGET_X86_64 + +//#define DEBUG_MULDIV + +static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) +{ + *plow += a; + /* carry test */ + if (*plow < a) + (*phigh)++; + *phigh += b; +} + +static void neg128(uint64_t *plow, uint64_t *phigh) +{ + *plow = ~ *plow; + *phigh = ~ *phigh; + add128(plow, phigh, 1, 0); +} + +static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) +{ + uint32_t a0, a1, b0, b1; + uint64_t v; + + a0 = a; + a1 = a >> 32; + + b0 = b; + b1 = b >> 32; + + v = (uint64_t)a0 * (uint64_t)b0; + *plow = v; + *phigh = 0; + + v = (uint64_t)a0 * (uint64_t)b1; + add128(plow, phigh, v << 32, v >> 32); + + v = (uint64_t)a1 * (uint64_t)b0; + add128(plow, phigh, v << 32, v >> 32); + + v = (uint64_t)a1 * (uint64_t)b1; + *phigh += v; +#ifdef DEBUG_MULDIV + printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n", + a, b, *phigh, *plow); +#endif +} + +static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) +{ + int sa, sb; + sa = (a < 0); + if (sa) + a = -a; + sb = (b < 0); + if (sb) + b = -b; + mul64(plow, phigh, a, b); + if (sa ^ sb) { + neg128(plow, phigh); + } +} + +/* return TRUE if overflow */ +static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) +{ + uint64_t q, r, a1, a0; + int i, qb, ab; + + a0 = *plow; + a1 = *phigh; + if (a1 == 0) { + q = a0 / b; + r = a0 % b; + *plow = q; + *phigh = r; + } else { + if (a1 >= b) + return 1; + /* XXX: use a better algorithm */ + for(i = 0; i < 64; i++) { + ab = a1 >> 63; + a1 = (a1 << 1) | (a0 >> 63); + if (ab || a1 >= b) { + a1 -= b; + qb = 1; + } else { + qb = 0; + } + a0 = (a0 << 1) | qb; + } +#if defined(DEBUG_MULDIV) + printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", + *phigh, *plow, b, a0, a1); +#endif + *plow = a0; + *phigh = a1; + } + return 0; +} + +/* return TRUE if overflow */ +static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) +{ + int sa, sb; + sa = ((int64_t)*phigh < 0); + if (sa) + neg128(plow, phigh); + sb = (b < 0); + if (sb) + b = -b; + if (div64(plow, phigh, b) != 0) + return 1; + if (sa ^ sb) { + if (*plow > (1ULL << 63)) + return 1; + *plow = - *plow; + } else { + if (*plow >= (1ULL << 63)) + return 1; + } + if (sa) + *phigh = - *phigh; + return 0; +} + +void helper_mulq_EAX_T0(void) +{ + uint64_t r0, r1; + + mul64(&r0, &r1, EAX, T0); + EAX = r0; + EDX = r1; + CC_DST = r0; + CC_SRC = r1; +} + +void helper_imulq_EAX_T0(void) +{ + uint64_t r0, r1; + + imul64(&r0, &r1, EAX, T0); + EAX = r0; + EDX = r1; + CC_DST = r0; + CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); +} + +void helper_imulq_T0_T1(void) +{ + uint64_t r0, r1; + + imul64(&r0, &r1, T0, T1); + T0 = r0; + CC_DST = r0; + CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63)); +} + +void helper_divq_EAX_T0(void) +{ + uint64_t r0, r1; + if (T0 == 0) { + raise_exception(EXCP00_DIVZ); + } + r0 = EAX; + r1 = EDX; + if (div64(&r0, &r1, T0)) + raise_exception(EXCP00_DIVZ); + EAX = r0; + EDX = r1; +} + +void helper_idivq_EAX_T0(void) +{ + uint64_t r0, r1; + if (T0 == 0) { + raise_exception(EXCP00_DIVZ); + } + r0 = EAX; + r1 = EDX; + if (idiv64(&r0, &r1, T0)) + raise_exception(EXCP00_DIVZ); + EAX = r0; + EDX = r1; +} + +void helper_bswapq_T0(void) +{ + T0 = bswap64(T0); +} +#endif + +void helper_hlt(void) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ + env->hflags |= HF_HALTED_MASK; + env->exception_index = EXCP_HLT; + cpu_loop_exit(); +} + +void helper_monitor(void) +{ + if (ECX != 0) + raise_exception(EXCP0D_GPF); + /* XXX: store address ? */ +} + +void helper_mwait(void) +{ + if (ECX != 0) + raise_exception(EXCP0D_GPF); + /* XXX: not complete but not completely erroneous */ + if (env->cpu_index != 0 || env->next_cpu != NULL) { + /* more than one CPU: do not sleep because another CPU may + wake this one */ + } else { + helper_hlt(); + } +} + +float approx_rsqrt(float a) +{ + return 1.0 / sqrt(a); +} + +float approx_rcp(float a) +{ + return 1.0 / a; +} + +void update_fp_status(void) +{ + int rnd_type; + + /* set rounding mode */ + switch(env->fpuc & RC_MASK) { + default: + case RC_NEAR: + rnd_type = float_round_nearest_even; + break; + case RC_DOWN: + rnd_type = float_round_down; + break; + case RC_UP: + rnd_type = float_round_up; + break; + case RC_CHOP: + rnd_type = float_round_to_zero; + break; + } + set_float_rounding_mode(rnd_type, &env->fp_status); +#ifdef FLOATX80 + switch((env->fpuc >> 8) & 3) { + case 0: + rnd_type = 32; + break; + case 2: + rnd_type = 64; + break; + case 3: + default: + rnd_type = 80; + break; + } + set_floatx80_rounding_precision(rnd_type, &env->fp_status); +#endif +} + +#if !defined(CONFIG_USER_ONLY) + +#define MMUSUFFIX _mmu +#define GETPC() (__builtin_return_address(0)) + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" + +#endif + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr) +{ + TranslationBlock *tb; + int ret; + unsigned long pc; + CPUX86State *saved_env; + + /* XXX: hack to restore env in all cases, even if not called from + generated code */ + saved_env = env; + env = cpu_single_env; + + ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1); + if (ret) { + if (retaddr) { + /* now we have a real cpu fault */ + pc = (unsigned long)retaddr; + tb = tb_find_pc(pc); + if (tb) { + /* the PC is inside the translated code. It means that we have + a virtual CPU fault */ + cpu_restore_state(tb, env, pc, NULL); + } + } + if (retaddr) + raise_exception_err(env->exception_index, env->error_code); + else + raise_exception_err_norestore(env->exception_index, env->error_code); + } + env = saved_env; +} diff --git a/target-i386/helper2.c b/target-i386/helper2.c new file mode 100644 index 0000000..19af159 --- /dev/null +++ b/target-i386/helper2.c @@ -0,0 +1,1031 @@ +/* + * i386 helpers (without register variable usage) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <signal.h> +#include <assert.h> + +#include "cpu.h" +#include "exec-all.h" + +//#define DEBUG_MMU + +#ifdef USE_CODE_COPY +#include <asm/ldt.h> +#include <linux/unistd.h> +#include <linux/version.h> + +int modify_ldt(int func, void *ptr, unsigned long bytecount) +{ + return syscall(__NR_modify_ldt, func, ptr, bytecount); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66) +#define modify_ldt_ldt_s user_desc +#endif +#endif /* USE_CODE_COPY */ + +CPUX86State *cpu_x86_init(void) +{ + CPUX86State *env; + static int inited; + + env = qemu_mallocz(sizeof(CPUX86State)); + if (!env) + return NULL; + cpu_exec_init(env); + + /* init various static tables */ + if (!inited) { + inited = 1; + optimize_flags_init(); + } +#ifdef USE_CODE_COPY + /* testing code for code copy case */ + { + struct modify_ldt_ldt_s ldt; + + ldt.entry_number = 1; + ldt.base_addr = (unsigned long)env; + ldt.limit = (sizeof(CPUState) + 0xfff) >> 12; + ldt.seg_32bit = 1; + ldt.contents = MODIFY_LDT_CONTENTS_DATA; + ldt.read_exec_only = 0; + ldt.limit_in_pages = 1; + ldt.seg_not_present = 0; + ldt.useable = 1; + modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */ + + asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7)); + } +#endif + { + int family, model, stepping; +#ifdef TARGET_X86_64 + env->cpuid_vendor1 = 0x68747541; /* "Auth" */ + env->cpuid_vendor2 = 0x69746e65; /* "enti" */ + env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */ + family = 6; + model = 2; + stepping = 3; +#else + env->cpuid_vendor1 = 0x756e6547; /* "Genu" */ + env->cpuid_vendor2 = 0x49656e69; /* "ineI" */ + env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */ +#if 0 + /* pentium 75-200 */ + family = 5; + model = 2; + stepping = 11; +#else + /* pentium pro */ + family = 6; + model = 3; + stepping = 3; +#endif +#endif + env->cpuid_level = 2; + env->cpuid_version = (family << 8) | (model << 4) | stepping; + env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE | + CPUID_TSC | CPUID_MSR | CPUID_MCE | + CPUID_CX8 | CPUID_PGE | CPUID_CMOV | + CPUID_PAT); + env->pat = 0x0007040600070406ULL; + env->cpuid_ext_features = CPUID_EXT_SSE3; + env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP; + env->cpuid_features |= CPUID_APIC; + env->cpuid_xlevel = 0; + { + const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION; + int c, len, i; + len = strlen(model_id); + for(i = 0; i < 48; i++) { + if (i >= len) + c = '\0'; + else + c = model_id[i]; + env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); + } + } +#ifdef TARGET_X86_64 + /* currently not enabled for std i386 because not fully tested */ + env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF); + env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX; + env->cpuid_xlevel = 0x80000008; + + /* these features are needed for Win64 and aren't fully implemented */ + env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA; +#endif + } + cpu_reset(env); +#ifdef USE_KQEMU + kqemu_init(env); +#endif + return env; +} + +/* NOTE: must be called outside the CPU execute loop */ +void cpu_reset(CPUX86State *env) +{ + int i; + + memset(env, 0, offsetof(CPUX86State, breakpoints)); + + tlb_flush(env, 1); + + /* init to reset state */ + +#ifdef CONFIG_SOFTMMU + env->hflags |= HF_SOFTMMU_MASK; +#endif + + cpu_x86_update_cr0(env, 0x60000010); + env->a20_mask = 0xffffffff; + + env->idt.limit = 0xffff; + env->gdt.limit = 0xffff; + env->ldt.limit = 0xffff; + env->ldt.flags = DESC_P_MASK; + env->tr.limit = 0xffff; + env->tr.flags = DESC_P_MASK; + + cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0); + + env->eip = 0xfff0; + env->regs[R_EDX] = 0x600; /* indicate P6 processor */ + + env->eflags = 0x2; + + /* FPU init */ + for(i = 0;i < 8; i++) + env->fptags[i] = 1; + env->fpuc = 0x37f; + + env->mxcsr = 0x1f80; +} + +void cpu_x86_close(CPUX86State *env) +{ + free(env); +} + +/***********************************************************/ +/* x86 debug */ + +static const char *cc_op_str[] = { + "DYNAMIC", + "EFLAGS", + + "MULB", + "MULW", + "MULL", + "MULQ", + + "ADDB", + "ADDW", + "ADDL", + "ADDQ", + + "ADCB", + "ADCW", + "ADCL", + "ADCQ", + + "SUBB", + "SUBW", + "SUBL", + "SUBQ", + + "SBBB", + "SBBW", + "SBBL", + "SBBQ", + + "LOGICB", + "LOGICW", + "LOGICL", + "LOGICQ", + + "INCB", + "INCW", + "INCL", + "INCQ", + + "DECB", + "DECW", + "DECL", + "DECQ", + + "SHLB", + "SHLW", + "SHLL", + "SHLQ", + + "SARB", + "SARW", + "SARL", + "SARQ", +}; + +void cpu_dump_state(CPUState *env, FILE *f, + int (*cpu_fprintf)(FILE *f, const char *fmt, ...), + int flags) +{ + int eflags, i, nb; + char cc_op_name[32]; + static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; + + eflags = env->eflags; +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + cpu_fprintf(f, + "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" + "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" + "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" + "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" + "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n", + env->regs[R_EAX], + env->regs[R_EBX], + env->regs[R_ECX], + env->regs[R_EDX], + env->regs[R_ESI], + env->regs[R_EDI], + env->regs[R_EBP], + env->regs[R_ESP], + env->regs[8], + env->regs[9], + env->regs[10], + env->regs[11], + env->regs[12], + env->regs[13], + env->regs[14], + env->regs[15], + env->eip, eflags, + eflags & DF_MASK ? 'D' : '-', + eflags & CC_O ? 'O' : '-', + eflags & CC_S ? 'S' : '-', + eflags & CC_Z ? 'Z' : '-', + eflags & CC_A ? 'A' : '-', + eflags & CC_P ? 'P' : '-', + eflags & CC_C ? 'C' : '-', + env->hflags & HF_CPL_MASK, + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, + (env->a20_mask >> 20) & 1, + (env->hflags >> HF_HALTED_SHIFT) & 1); + } else +#endif + { + cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" + "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" + "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n", + (uint32_t)env->regs[R_EAX], + (uint32_t)env->regs[R_EBX], + (uint32_t)env->regs[R_ECX], + (uint32_t)env->regs[R_EDX], + (uint32_t)env->regs[R_ESI], + (uint32_t)env->regs[R_EDI], + (uint32_t)env->regs[R_EBP], + (uint32_t)env->regs[R_ESP], + (uint32_t)env->eip, eflags, + eflags & DF_MASK ? 'D' : '-', + eflags & CC_O ? 'O' : '-', + eflags & CC_S ? 'S' : '-', + eflags & CC_Z ? 'Z' : '-', + eflags & CC_A ? 'A' : '-', + eflags & CC_P ? 'P' : '-', + eflags & CC_C ? 'C' : '-', + env->hflags & HF_CPL_MASK, + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, + (env->a20_mask >> 20) & 1, + (env->hflags >> HF_HALTED_SHIFT) & 1); + } + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + for(i = 0; i < 6; i++) { + SegmentCache *sc = &env->segs[i]; + cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n", + seg_name[i], + sc->selector, + sc->base, + sc->limit, + sc->flags); + } + cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n", + env->ldt.selector, + env->ldt.base, + env->ldt.limit, + env->ldt.flags); + cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n", + env->tr.selector, + env->tr.base, + env->tr.limit, + env->tr.flags); + cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", + env->gdt.base, env->gdt.limit); + cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", + env->idt.base, env->idt.limit); + cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", + (uint32_t)env->cr[0], + env->cr[2], + env->cr[3], + (uint32_t)env->cr[4]); + } else +#endif + { + for(i = 0; i < 6; i++) { + SegmentCache *sc = &env->segs[i]; + cpu_fprintf(f, "%s =%04x %08x %08x %08x\n", + seg_name[i], + sc->selector, + (uint32_t)sc->base, + sc->limit, + sc->flags); + } + cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n", + env->ldt.selector, + (uint32_t)env->ldt.base, + env->ldt.limit, + env->ldt.flags); + cpu_fprintf(f, "TR =%04x %08x %08x %08x\n", + env->tr.selector, + (uint32_t)env->tr.base, + env->tr.limit, + env->tr.flags); + cpu_fprintf(f, "GDT= %08x %08x\n", + (uint32_t)env->gdt.base, env->gdt.limit); + cpu_fprintf(f, "IDT= %08x %08x\n", + (uint32_t)env->idt.base, env->idt.limit); + cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", + (uint32_t)env->cr[0], + (uint32_t)env->cr[2], + (uint32_t)env->cr[3], + (uint32_t)env->cr[4]); + } + if (flags & X86_DUMP_CCOP) { + if ((unsigned)env->cc_op < CC_OP_NB) + snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); + else + snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", + env->cc_src, env->cc_dst, + cc_op_name); + } else +#endif + { + cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", + (uint32_t)env->cc_src, (uint32_t)env->cc_dst, + cc_op_name); + } + } + if (flags & X86_DUMP_FPU) { + int fptag; + fptag = 0; + for(i = 0; i < 8; i++) { + fptag |= ((!env->fptags[i]) << i); + } + cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", + env->fpuc, + (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, + env->fpstt, + fptag, + env->mxcsr); + for(i=0;i<8;i++) { +#if defined(USE_X86LDOUBLE) + union { + long double d; + struct { + uint64_t lower; + uint16_t upper; + } l; + } tmp; + tmp.d = env->fpregs[i].d; + cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", + i, tmp.l.lower, tmp.l.upper); +#else + cpu_fprintf(f, "FPR%d=%016" PRIx64, + i, env->fpregs[i].mmx.q); +#endif + if ((i & 1) == 1) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + if (env->hflags & HF_CS64_MASK) + nb = 16; + else + nb = 8; + for(i=0;i<nb;i++) { + cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x", + i, + env->xmm_regs[i].XMM_L(3), + env->xmm_regs[i].XMM_L(2), + env->xmm_regs[i].XMM_L(1), + env->xmm_regs[i].XMM_L(0)); + if ((i & 1) == 1) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + } +} + +/***********************************************************/ +/* x86 mmu */ +/* XXX: add PGE support */ + +void cpu_x86_set_a20(CPUX86State *env, int a20_state) +{ + a20_state = (a20_state != 0); + if (a20_state != ((env->a20_mask >> 20) & 1)) { +#if defined(DEBUG_MMU) + printf("A20 update: a20=%d\n", a20_state); +#endif + /* if the cpu is currently executing code, we must unlink it and + all the potentially executing TB */ + cpu_interrupt(env, CPU_INTERRUPT_EXITTB); + + /* when a20 is changed, all the MMU mappings are invalid, so + we must flush everything */ + tlb_flush(env, 1); + env->a20_mask = 0xffefffff | (a20_state << 20); + } +} + +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) +{ + int pe_state; + +#if defined(DEBUG_MMU) + printf("CR0 update: CR0=0x%08x\n", new_cr0); +#endif + if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != + (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { + tlb_flush(env, 1); + } + +#ifdef TARGET_X86_64 + if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LME)) { + /* enter in long mode */ + /* XXX: generate an exception */ + if (!(env->cr[4] & CR4_PAE_MASK)) + return; + env->efer |= MSR_EFER_LMA; + env->hflags |= HF_LMA_MASK; + } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LMA)) { + /* exit long mode */ + env->efer &= ~MSR_EFER_LMA; + env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); + env->eip &= 0xffffffff; + } +#endif + env->cr[0] = new_cr0 | CR0_ET_MASK; + + /* update PE flag in hidden flags */ + pe_state = (env->cr[0] & CR0_PE_MASK); + env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); + /* ensure that ADDSEG is always set in real mode */ + env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); + /* update FPU flags */ + env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | + ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); +} + +/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in + the PDPT */ +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) +{ + env->cr[3] = new_cr3; + if (env->cr[0] & CR0_PG_MASK) { +#if defined(DEBUG_MMU) + printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); +#endif + tlb_flush(env, 0); + } +} + +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) +{ +#if defined(DEBUG_MMU) + printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); +#endif + if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != + (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { + tlb_flush(env, 1); + } + /* SSE handling */ + if (!(env->cpuid_features & CPUID_SSE)) + new_cr4 &= ~CR4_OSFXSR_MASK; + if (new_cr4 & CR4_OSFXSR_MASK) + env->hflags |= HF_OSFXSR_MASK; + else + env->hflags &= ~HF_OSFXSR_MASK; + + env->cr[4] = new_cr4; +} + +/* XXX: also flush 4MB pages */ +void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr) +{ + tlb_flush_page(env, addr); +} + +#if defined(CONFIG_USER_ONLY) + +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, + int is_write, int is_user, int is_softmmu) +{ + /* user mode only emulation */ + is_write &= 1; + env->cr[2] = addr; + env->error_code = (is_write << PG_ERROR_W_BIT); + env->error_code |= PG_ERROR_U_MASK; + env->exception_index = EXCP0E_PAGE; + return 1; +} + +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +{ + return addr; +} + +#else + +#define PHYS_ADDR_MASK 0xfffff000 + +/* return value: + -1 = cannot handle fault + 0 = nothing more to do + 1 = generate PF fault + 2 = soft MMU activation required for this block +*/ +int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, + int is_write1, int is_user, int is_softmmu) +{ + uint64_t ptep, pte; + uint32_t pdpe_addr, pde_addr, pte_addr; + int error_code, is_dirty, prot, page_size, ret, is_write; + unsigned long paddr, page_offset; + target_ulong vaddr, virt_addr; + +#if defined(DEBUG_MMU) + printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", + addr, is_write1, is_user, env->eip); +#endif + is_write = is_write1 & 1; + + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr; + virt_addr = addr & TARGET_PAGE_MASK; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + page_size = 4096; + goto do_mapping; + } + + if (env->cr[4] & CR4_PAE_MASK) { + uint64_t pde, pdpe; + + /* XXX: we only use 32 bit physical addresses */ +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t pml4e_addr; + uint64_t pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = (int64_t)addr >> 47; + if (sext != 0 && sext != -1) { + env->error_code = 0; + env->exception_index = EXCP0D_GPF; + return 1; + } + + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & + env->a20_mask; + pml4e = ldq_phys(pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { + error_code = PG_ERROR_RSVD_MASK; + goto do_fault; + } + if (!(pml4e & PG_ACCESSED_MASK)) { + pml4e |= PG_ACCESSED_MASK; + stl_phys_notdirty(pml4e_addr, pml4e); + } + ptep = pml4e ^ PG_NX_MASK; + pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & + env->a20_mask; + pdpe = ldq_phys(pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { + error_code = PG_ERROR_RSVD_MASK; + goto do_fault; + } + ptep &= pdpe ^ PG_NX_MASK; + if (!(pdpe & PG_ACCESSED_MASK)) { + pdpe |= PG_ACCESSED_MASK; + stl_phys_notdirty(pdpe_addr, pdpe); + } + } else +#endif + { + /* XXX: load them when cr3 is loaded ? */ + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & + env->a20_mask; + pdpe = ldq_phys(pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & + env->a20_mask; + pde = ldq_phys(pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { + error_code = PG_ERROR_RSVD_MASK; + goto do_fault; + } + ptep &= pde ^ PG_NX_MASK; + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + ptep ^= PG_NX_MASK; + if ((ptep & PG_NX_MASK) && is_write1 == 2) + goto do_fault_protect; + if (is_user) { + if (!(ptep & PG_USER_MASK)) + goto do_fault_protect; + if (is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } else { + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } + is_dirty = is_write && !(pde & PG_DIRTY_MASK); + if (!(pde & PG_ACCESSED_MASK) || is_dirty) { + pde |= PG_ACCESSED_MASK; + if (is_dirty) + pde |= PG_DIRTY_MASK; + stl_phys_notdirty(pde_addr, pde); + } + /* align to page_size */ + pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); + virt_addr = addr & ~(page_size - 1); + } else { + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(pde_addr, pde); + } + pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + env->a20_mask; + pte = ldq_phys(pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { + error_code = PG_ERROR_RSVD_MASK; + goto do_fault; + } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + ptep ^= PG_NX_MASK; + if ((ptep & PG_NX_MASK) && is_write1 == 2) + goto do_fault_protect; + if (is_user) { + if (!(ptep & PG_USER_MASK)) + goto do_fault_protect; + if (is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } else { + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) + pte |= PG_DIRTY_MASK; + stl_phys_notdirty(pte_addr, pte); + } + page_size = 4096; + virt_addr = addr & ~0xfff; + pte = pte & (PHYS_ADDR_MASK | 0xfff); + } + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & + env->a20_mask; + pde = ldl_phys(pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + /* if PSE bit is set, then we use a 4MB page */ + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + page_size = 4096 * 1024; + if (is_user) { + if (!(pde & PG_USER_MASK)) + goto do_fault_protect; + if (is_write && !(pde & PG_RW_MASK)) + goto do_fault_protect; + } else { + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(pde & PG_RW_MASK)) + goto do_fault_protect; + } + is_dirty = is_write && !(pde & PG_DIRTY_MASK); + if (!(pde & PG_ACCESSED_MASK) || is_dirty) { + pde |= PG_ACCESSED_MASK; + if (is_dirty) + pde |= PG_DIRTY_MASK; + stl_phys_notdirty(pde_addr, pde); + } + + pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ + ptep = pte; + virt_addr = addr & ~(page_size - 1); + } else { + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(pde_addr, pde); + } + + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & + env->a20_mask; + pte = ldl_phys(pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + error_code = 0; + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep = pte & pde; + if (is_user) { + if (!(ptep & PG_USER_MASK)) + goto do_fault_protect; + if (is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } else { + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) + goto do_fault_protect; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) + pte |= PG_DIRTY_MASK; + stl_phys_notdirty(pte_addr, pte); + } + page_size = 4096; + virt_addr = addr & ~0xfff; + } + } + /* the page can be put in the TLB */ + prot = PAGE_READ; + if (!(ptep & PG_NX_MASK)) + prot |= PAGE_EXEC; + if (pte & PG_DIRTY_MASK) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + if (is_user) { + if (ptep & PG_RW_MASK) + prot |= PAGE_WRITE; + } else { + if (!(env->cr[0] & CR0_WP_MASK) || + (ptep & PG_RW_MASK)) + prot |= PAGE_WRITE; + } + } + do_mapping: + pte = pte & env->a20_mask; + + /* Even if 4MB pages, we map only one 4KB page in the cache to + avoid filling it too fast */ + page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); + paddr = (pte & TARGET_PAGE_MASK) + page_offset; + vaddr = virt_addr + page_offset; + + ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu); + return ret; + do_fault_protect: + error_code = PG_ERROR_P_MASK; + do_fault: + env->cr[2] = addr; + error_code |= (is_write << PG_ERROR_W_BIT); + if (is_user) + error_code |= PG_ERROR_U_MASK; + if (is_write1 == 2 && + (env->efer & MSR_EFER_NXE) && + (env->cr[4] & CR4_PAE_MASK)) + error_code |= PG_ERROR_I_D_MASK; + env->error_code = error_code; + env->exception_index = EXCP0E_PAGE; + return 1; +} + +target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +{ + uint32_t pde_addr, pte_addr; + uint32_t pde, pte, paddr, page_offset, page_size; + + if (env->cr[4] & CR4_PAE_MASK) { + uint32_t pdpe_addr, pde_addr, pte_addr; + uint32_t pdpe; + + /* XXX: we only use 32 bit physical addresses */ +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = (int64_t)addr >> 47; + if (sext != 0 && sext != -1) + return -1; + + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & + env->a20_mask; + pml4e = ldl_phys(pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) + return -1; + + pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) & + env->a20_mask; + pdpe = ldl_phys(pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) + return -1; + } else +#endif + { + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) & + env->a20_mask; + pdpe = ldl_phys(pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) + return -1; + } + + pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) & + env->a20_mask; + pde = ldl_phys(pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + return -1; + } + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ + } else { + /* 4 KB page */ + pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) & + env->a20_mask; + page_size = 4096; + pte = ldl_phys(pte_addr); + } + } else { + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr; + page_size = 4096; + } else { + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask; + pde = ldl_phys(pde_addr); + if (!(pde & PG_PRESENT_MASK)) + return -1; + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + pte = pde & ~0x003ff000; /* align to 4MB */ + page_size = 4096 * 1024; + } else { + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; + pte = ldl_phys(pte_addr); + if (!(pte & PG_PRESENT_MASK)) + return -1; + page_size = 4096; + } + } + pte = pte & env->a20_mask; + } + + page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); + paddr = (pte & TARGET_PAGE_MASK) + page_offset; + return paddr; +} +#endif /* !CONFIG_USER_ONLY */ + +#if defined(USE_CODE_COPY) +struct fpstate { + uint16_t fpuc; + uint16_t dummy1; + uint16_t fpus; + uint16_t dummy2; + uint16_t fptag; + uint16_t dummy3; + + uint32_t fpip; + uint32_t fpcs; + uint32_t fpoo; + uint32_t fpos; + uint8_t fpregs1[8 * 10]; +}; + +void restore_native_fp_state(CPUState *env) +{ + int fptag, i, j; + struct fpstate fp1, *fp = &fp1; + + fp->fpuc = env->fpuc; + fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i=7; i>=0; i--) { + fptag <<= 2; + if (env->fptags[i]) { + fptag |= 3; + } else { + /* the FPU automatically computes it */ + } + } + fp->fptag = fptag; + j = env->fpstt; + for(i = 0;i < 8; i++) { + memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10); + j = (j + 1) & 7; + } + asm volatile ("frstor %0" : "=m" (*fp)); + env->native_fp_regs = 1; +} + +void save_native_fp_state(CPUState *env) +{ + int fptag, i, j; + uint16_t fpuc; + struct fpstate fp1, *fp = &fp1; + + asm volatile ("fsave %0" : : "m" (*fp)); + env->fpuc = fp->fpuc; + env->fpstt = (fp->fpus >> 11) & 7; + env->fpus = fp->fpus & ~0x3800; + fptag = fp->fptag; + for(i = 0;i < 8; i++) { + env->fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } + j = env->fpstt; + for(i = 0;i < 8; i++) { + memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10); + j = (j + 1) & 7; + } + /* we must restore the default rounding state */ + /* XXX: we do not restore the exception state */ + fpuc = 0x037f | (env->fpuc & (3 << 10)); + asm volatile("fldcw %0" : : "m" (fpuc)); + env->native_fp_regs = 0; +} +#endif diff --git a/target-i386/op.c b/target-i386/op.c new file mode 100644 index 0000000..7a3aa77 --- /dev/null +++ b/target-i386/op.c @@ -0,0 +1,2444 @@ +/* + * i386 micro operations + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define ASM_SOFTMMU +#include "exec.h" + +/* n must be a constant to be efficient */ +static inline target_long lshift(target_long x, int n) +{ + if (n >= 0) + return x << n; + else + return x >> (-n); +} + +/* we define the various pieces of code used by the JIT */ + +#define REG EAX +#define REGNAME _EAX +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG ECX +#define REGNAME _ECX +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG EDX +#define REGNAME _EDX +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG EBX +#define REGNAME _EBX +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG ESP +#define REGNAME _ESP +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG EBP +#define REGNAME _EBP +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG ESI +#define REGNAME _ESI +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG EDI +#define REGNAME _EDI +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#ifdef TARGET_X86_64 + +#define REG (env->regs[8]) +#define REGNAME _R8 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[9]) +#define REGNAME _R9 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[10]) +#define REGNAME _R10 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[11]) +#define REGNAME _R11 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[12]) +#define REGNAME _R12 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[13]) +#define REGNAME _R13 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[14]) +#define REGNAME _R14 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#define REG (env->regs[15]) +#define REGNAME _R15 +#include "opreg_template.h" +#undef REG +#undef REGNAME + +#endif + +/* operations with flags */ + +/* update flags with T0 and T1 (add/sub case) */ +void OPPROTO op_update2_cc(void) +{ + CC_SRC = T1; + CC_DST = T0; +} + +/* update flags with T0 (logic operation case) */ +void OPPROTO op_update1_cc(void) +{ + CC_DST = T0; +} + +void OPPROTO op_update_neg_cc(void) +{ + CC_SRC = -T0; + CC_DST = T0; +} + +void OPPROTO op_cmpl_T0_T1_cc(void) +{ + CC_SRC = T1; + CC_DST = T0 - T1; +} + +void OPPROTO op_update_inc_cc(void) +{ + CC_SRC = cc_table[CC_OP].compute_c(); + CC_DST = T0; +} + +void OPPROTO op_testl_T0_T1_cc(void) +{ + CC_DST = T0 & T1; +} + +/* operations without flags */ + +void OPPROTO op_addl_T0_T1(void) +{ + T0 += T1; +} + +void OPPROTO op_orl_T0_T1(void) +{ + T0 |= T1; +} + +void OPPROTO op_andl_T0_T1(void) +{ + T0 &= T1; +} + +void OPPROTO op_subl_T0_T1(void) +{ + T0 -= T1; +} + +void OPPROTO op_xorl_T0_T1(void) +{ + T0 ^= T1; +} + +void OPPROTO op_negl_T0(void) +{ + T0 = -T0; +} + +void OPPROTO op_incl_T0(void) +{ + T0++; +} + +void OPPROTO op_decl_T0(void) +{ + T0--; +} + +void OPPROTO op_notl_T0(void) +{ + T0 = ~T0; +} + +void OPPROTO op_bswapl_T0(void) +{ + T0 = bswap32(T0); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_bswapq_T0(void) +{ + helper_bswapq_T0(); +} +#endif + +/* multiply/divide */ + +/* XXX: add eflags optimizations */ +/* XXX: add non P4 style flags */ + +void OPPROTO op_mulb_AL_T0(void) +{ + unsigned int res; + res = (uint8_t)EAX * (uint8_t)T0; + EAX = (EAX & ~0xffff) | res; + CC_DST = res; + CC_SRC = (res & 0xff00); +} + +void OPPROTO op_imulb_AL_T0(void) +{ + int res; + res = (int8_t)EAX * (int8_t)T0; + EAX = (EAX & ~0xffff) | (res & 0xffff); + CC_DST = res; + CC_SRC = (res != (int8_t)res); +} + +void OPPROTO op_mulw_AX_T0(void) +{ + unsigned int res; + res = (uint16_t)EAX * (uint16_t)T0; + EAX = (EAX & ~0xffff) | (res & 0xffff); + EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff); + CC_DST = res; + CC_SRC = res >> 16; +} + +void OPPROTO op_imulw_AX_T0(void) +{ + int res; + res = (int16_t)EAX * (int16_t)T0; + EAX = (EAX & ~0xffff) | (res & 0xffff); + EDX = (EDX & ~0xffff) | ((res >> 16) & 0xffff); + CC_DST = res; + CC_SRC = (res != (int16_t)res); +} + +void OPPROTO op_mull_EAX_T0(void) +{ + uint64_t res; + res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0); + EAX = (uint32_t)res; + EDX = (uint32_t)(res >> 32); + CC_DST = (uint32_t)res; + CC_SRC = (uint32_t)(res >> 32); +} + +void OPPROTO op_imull_EAX_T0(void) +{ + int64_t res; + res = (int64_t)((int32_t)EAX) * (int64_t)((int32_t)T0); + EAX = (uint32_t)(res); + EDX = (uint32_t)(res >> 32); + CC_DST = res; + CC_SRC = (res != (int32_t)res); +} + +void OPPROTO op_imulw_T0_T1(void) +{ + int res; + res = (int16_t)T0 * (int16_t)T1; + T0 = res; + CC_DST = res; + CC_SRC = (res != (int16_t)res); +} + +void OPPROTO op_imull_T0_T1(void) +{ + int64_t res; + res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1); + T0 = res; + CC_DST = res; + CC_SRC = (res != (int32_t)res); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_mulq_EAX_T0(void) +{ + helper_mulq_EAX_T0(); +} + +void OPPROTO op_imulq_EAX_T0(void) +{ + helper_imulq_EAX_T0(); +} + +void OPPROTO op_imulq_T0_T1(void) +{ + helper_imulq_T0_T1(); +} +#endif + +/* division, flags are undefined */ + +void OPPROTO op_divb_AL_T0(void) +{ + unsigned int num, den, q, r; + + num = (EAX & 0xffff); + den = (T0 & 0xff); + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xff) + raise_exception(EXCP00_DIVZ); + q &= 0xff; + r = (num % den) & 0xff; + EAX = (EAX & ~0xffff) | (r << 8) | q; +} + +void OPPROTO op_idivb_AL_T0(void) +{ + int num, den, q, r; + + num = (int16_t)EAX; + den = (int8_t)T0; + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } + q = (num / den); + if (q != (int8_t)q) + raise_exception(EXCP00_DIVZ); + q &= 0xff; + r = (num % den) & 0xff; + EAX = (EAX & ~0xffff) | (r << 8) | q; +} + +void OPPROTO op_divw_AX_T0(void) +{ + unsigned int num, den, q, r; + + num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); + den = (T0 & 0xffff); + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xffff) + raise_exception(EXCP00_DIVZ); + q &= 0xffff; + r = (num % den) & 0xffff; + EAX = (EAX & ~0xffff) | q; + EDX = (EDX & ~0xffff) | r; +} + +void OPPROTO op_idivw_AX_T0(void) +{ + int num, den, q, r; + + num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); + den = (int16_t)T0; + if (den == 0) { + raise_exception(EXCP00_DIVZ); + } + q = (num / den); + if (q != (int16_t)q) + raise_exception(EXCP00_DIVZ); + q &= 0xffff; + r = (num % den) & 0xffff; + EAX = (EAX & ~0xffff) | q; + EDX = (EDX & ~0xffff) | r; +} + +void OPPROTO op_divl_EAX_T0(void) +{ + helper_divl_EAX_T0(); +} + +void OPPROTO op_idivl_EAX_T0(void) +{ + helper_idivl_EAX_T0(); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_divq_EAX_T0(void) +{ + helper_divq_EAX_T0(); +} + +void OPPROTO op_idivq_EAX_T0(void) +{ + helper_idivq_EAX_T0(); +} +#endif + +/* constant load & misc op */ + +/* XXX: consistent names */ +void OPPROTO op_movl_T0_imu(void) +{ + T0 = (uint32_t)PARAM1; +} + +void OPPROTO op_movl_T0_im(void) +{ + T0 = (int32_t)PARAM1; +} + +void OPPROTO op_addl_T0_im(void) +{ + T0 += PARAM1; +} + +void OPPROTO op_andl_T0_ffff(void) +{ + T0 = T0 & 0xffff; +} + +void OPPROTO op_andl_T0_im(void) +{ + T0 = T0 & PARAM1; +} + +void OPPROTO op_movl_T0_T1(void) +{ + T0 = T1; +} + +void OPPROTO op_movl_T1_imu(void) +{ + T1 = (uint32_t)PARAM1; +} + +void OPPROTO op_movl_T1_im(void) +{ + T1 = (int32_t)PARAM1; +} + +void OPPROTO op_addl_T1_im(void) +{ + T1 += PARAM1; +} + +void OPPROTO op_movl_T1_A0(void) +{ + T1 = A0; +} + +void OPPROTO op_movl_A0_im(void) +{ + A0 = (uint32_t)PARAM1; +} + +void OPPROTO op_addl_A0_im(void) +{ + A0 = (uint32_t)(A0 + PARAM1); +} + +void OPPROTO op_movl_A0_seg(void) +{ + A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1); +} + +void OPPROTO op_addl_A0_seg(void) +{ + A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1)); +} + +void OPPROTO op_addl_A0_AL(void) +{ + A0 = (uint32_t)(A0 + (EAX & 0xff)); +} + +#ifdef WORDS_BIGENDIAN +typedef union UREG64 { + struct { uint16_t v3, v2, v1, v0; } w; + struct { uint32_t v1, v0; } l; + uint64_t q; +} UREG64; +#else +typedef union UREG64 { + struct { uint16_t v0, v1, v2, v3; } w; + struct { uint32_t v0, v1; } l; + uint64_t q; +} UREG64; +#endif + +#ifdef TARGET_X86_64 + +#define PARAMQ1 \ +({\ + UREG64 __p;\ + __p.l.v1 = PARAM1;\ + __p.l.v0 = PARAM2;\ + __p.q;\ +}) + +void OPPROTO op_movq_T0_im64(void) +{ + T0 = PARAMQ1; +} + +void OPPROTO op_movq_T1_im64(void) +{ + T1 = PARAMQ1; +} + +void OPPROTO op_movq_A0_im(void) +{ + A0 = (int32_t)PARAM1; +} + +void OPPROTO op_movq_A0_im64(void) +{ + A0 = PARAMQ1; +} + +void OPPROTO op_addq_A0_im(void) +{ + A0 = (A0 + (int32_t)PARAM1); +} + +void OPPROTO op_addq_A0_im64(void) +{ + A0 = (A0 + PARAMQ1); +} + +void OPPROTO op_movq_A0_seg(void) +{ + A0 = *(target_ulong *)((char *)env + PARAM1); +} + +void OPPROTO op_addq_A0_seg(void) +{ + A0 += *(target_ulong *)((char *)env + PARAM1); +} + +void OPPROTO op_addq_A0_AL(void) +{ + A0 = (A0 + (EAX & 0xff)); +} + +#endif + +void OPPROTO op_andl_A0_ffff(void) +{ + A0 = A0 & 0xffff; +} + +/* memory access */ + +#define MEMSUFFIX _raw +#include "ops_mem.h" + +#if !defined(CONFIG_USER_ONLY) +#define MEMSUFFIX _kernel +#include "ops_mem.h" + +#define MEMSUFFIX _user +#include "ops_mem.h" +#endif + +/* indirect jump */ + +void OPPROTO op_jmp_T0(void) +{ + EIP = T0; +} + +void OPPROTO op_movl_eip_im(void) +{ + EIP = (uint32_t)PARAM1; +} + +#ifdef TARGET_X86_64 +void OPPROTO op_movq_eip_im(void) +{ + EIP = (int32_t)PARAM1; +} + +void OPPROTO op_movq_eip_im64(void) +{ + EIP = PARAMQ1; +} +#endif + +void OPPROTO op_hlt(void) +{ + helper_hlt(); +} + +void OPPROTO op_monitor(void) +{ + helper_monitor(); +} + +void OPPROTO op_mwait(void) +{ + helper_mwait(); +} + +void OPPROTO op_debug(void) +{ + env->exception_index = EXCP_DEBUG; + cpu_loop_exit(); +} + +void OPPROTO op_raise_interrupt(void) +{ + int intno, next_eip_addend; + intno = PARAM1; + next_eip_addend = PARAM2; + raise_interrupt(intno, 1, 0, next_eip_addend); +} + +void OPPROTO op_raise_exception(void) +{ + int exception_index; + exception_index = PARAM1; + raise_exception(exception_index); +} + +void OPPROTO op_into(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + if (eflags & CC_O) { + raise_interrupt(EXCP04_INTO, 1, 0, PARAM1); + } + FORCE_RET(); +} + +void OPPROTO op_cli(void) +{ + env->eflags &= ~IF_MASK; +} + +void OPPROTO op_sti(void) +{ + env->eflags |= IF_MASK; +} + +void OPPROTO op_set_inhibit_irq(void) +{ + env->hflags |= HF_INHIBIT_IRQ_MASK; +} + +void OPPROTO op_reset_inhibit_irq(void) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; +} + +#if 0 +/* vm86plus instructions */ +void OPPROTO op_cli_vm(void) +{ + env->eflags &= ~VIF_MASK; +} + +void OPPROTO op_sti_vm(void) +{ + env->eflags |= VIF_MASK; + if (env->eflags & VIP_MASK) { + EIP = PARAM1; + raise_exception(EXCP0D_GPF); + } + FORCE_RET(); +} +#endif + +void OPPROTO op_boundw(void) +{ + int low, high, v; + low = ldsw(A0); + high = ldsw(A0 + 2); + v = (int16_t)T0; + if (v < low || v > high) { + raise_exception(EXCP05_BOUND); + } + FORCE_RET(); +} + +void OPPROTO op_boundl(void) +{ + int low, high, v; + low = ldl(A0); + high = ldl(A0 + 4); + v = T0; + if (v < low || v > high) { + raise_exception(EXCP05_BOUND); + } + FORCE_RET(); +} + +void OPPROTO op_cmpxchg8b(void) +{ + helper_cmpxchg8b(); +} + +void OPPROTO op_movl_T0_0(void) +{ + T0 = 0; +} + +void OPPROTO op_exit_tb(void) +{ + EXIT_TB(); +} + +/* multiple size ops */ + +#define ldul ldl + +#define SHIFT 0 +#include "ops_template.h" +#undef SHIFT + +#define SHIFT 1 +#include "ops_template.h" +#undef SHIFT + +#define SHIFT 2 +#include "ops_template.h" +#undef SHIFT + +#ifdef TARGET_X86_64 + +#define SHIFT 3 +#include "ops_template.h" +#undef SHIFT + +#endif + +/* sign extend */ + +void OPPROTO op_movsbl_T0_T0(void) +{ + T0 = (int8_t)T0; +} + +void OPPROTO op_movzbl_T0_T0(void) +{ + T0 = (uint8_t)T0; +} + +void OPPROTO op_movswl_T0_T0(void) +{ + T0 = (int16_t)T0; +} + +void OPPROTO op_movzwl_T0_T0(void) +{ + T0 = (uint16_t)T0; +} + +void OPPROTO op_movswl_EAX_AX(void) +{ + EAX = (int16_t)EAX; +} + +#ifdef TARGET_X86_64 +void OPPROTO op_movslq_T0_T0(void) +{ + T0 = (int32_t)T0; +} + +void OPPROTO op_movslq_RAX_EAX(void) +{ + EAX = (int32_t)EAX; +} +#endif + +void OPPROTO op_movsbw_AX_AL(void) +{ + EAX = (EAX & ~0xffff) | ((int8_t)EAX & 0xffff); +} + +void OPPROTO op_movslq_EDX_EAX(void) +{ + EDX = (int32_t)EAX >> 31; +} + +void OPPROTO op_movswl_DX_AX(void) +{ + EDX = (EDX & ~0xffff) | (((int16_t)EAX >> 15) & 0xffff); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_movsqo_RDX_RAX(void) +{ + EDX = (int64_t)EAX >> 63; +} +#endif + +/* string ops helpers */ + +void OPPROTO op_addl_ESI_T0(void) +{ + ESI = (uint32_t)(ESI + T0); +} + +void OPPROTO op_addw_ESI_T0(void) +{ + ESI = (ESI & ~0xffff) | ((ESI + T0) & 0xffff); +} + +void OPPROTO op_addl_EDI_T0(void) +{ + EDI = (uint32_t)(EDI + T0); +} + +void OPPROTO op_addw_EDI_T0(void) +{ + EDI = (EDI & ~0xffff) | ((EDI + T0) & 0xffff); +} + +void OPPROTO op_decl_ECX(void) +{ + ECX = (uint32_t)(ECX - 1); +} + +void OPPROTO op_decw_ECX(void) +{ + ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_addq_ESI_T0(void) +{ + ESI = (ESI + T0); +} + +void OPPROTO op_addq_EDI_T0(void) +{ + EDI = (EDI + T0); +} + +void OPPROTO op_decq_ECX(void) +{ + ECX--; +} +#endif + +/* push/pop utils */ + +void op_addl_A0_SS(void) +{ + A0 = (uint32_t)(A0 + env->segs[R_SS].base); +} + +void op_subl_A0_2(void) +{ + A0 = (uint32_t)(A0 - 2); +} + +void op_subl_A0_4(void) +{ + A0 = (uint32_t)(A0 - 4); +} + +void op_addl_ESP_4(void) +{ + ESP = (uint32_t)(ESP + 4); +} + +void op_addl_ESP_2(void) +{ + ESP = (uint32_t)(ESP + 2); +} + +void op_addw_ESP_4(void) +{ + ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff); +} + +void op_addw_ESP_2(void) +{ + ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff); +} + +void op_addl_ESP_im(void) +{ + ESP = (uint32_t)(ESP + PARAM1); +} + +void op_addw_ESP_im(void) +{ + ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff); +} + +#ifdef TARGET_X86_64 +void op_subq_A0_2(void) +{ + A0 -= 2; +} + +void op_subq_A0_8(void) +{ + A0 -= 8; +} + +void op_addq_ESP_8(void) +{ + ESP += 8; +} + +void op_addq_ESP_im(void) +{ + ESP += PARAM1; +} +#endif + +void OPPROTO op_rdtsc(void) +{ + helper_rdtsc(); +} + +void OPPROTO op_cpuid(void) +{ + helper_cpuid(); +} + +void OPPROTO op_enter_level(void) +{ + helper_enter_level(PARAM1, PARAM2); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_enter64_level(void) +{ + helper_enter64_level(PARAM1, PARAM2); +} +#endif + +void OPPROTO op_sysenter(void) +{ + helper_sysenter(); +} + +void OPPROTO op_sysexit(void) +{ + helper_sysexit(); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_syscall(void) +{ + helper_syscall(PARAM1); +} + +void OPPROTO op_sysret(void) +{ + helper_sysret(PARAM1); +} +#endif + +void OPPROTO op_rdmsr(void) +{ + helper_rdmsr(); +} + +void OPPROTO op_wrmsr(void) +{ + helper_wrmsr(); +} + +/* bcd */ + +/* XXX: exception */ +void OPPROTO op_aam(void) +{ + int base = PARAM1; + int al, ah; + al = EAX & 0xff; + ah = al / base; + al = al % base; + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_DST = al; +} + +void OPPROTO op_aad(void) +{ + int base = PARAM1; + int al, ah; + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + al = ((ah * base) + al) & 0xff; + EAX = (EAX & ~0xffff) | al; + CC_DST = al; +} + +void OPPROTO op_aaa(void) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = cc_table[CC_OP].compute_all(); + af = eflags & CC_A; + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + + icarry = (al > 0xf9); + if (((al & 0x0f) > 9 ) || af) { + al = (al + 6) & 0x0f; + ah = (ah + 1 + icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; + FORCE_RET(); +} + +void OPPROTO op_aas(void) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = cc_table[CC_OP].compute_all(); + af = eflags & CC_A; + al = EAX & 0xff; + ah = (EAX >> 8) & 0xff; + + icarry = (al < 6); + if (((al & 0x0f) > 9 ) || af) { + al = (al - 6) & 0x0f; + ah = (ah - 1 - icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + EAX = (EAX & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; + FORCE_RET(); +} + +void OPPROTO op_daa(void) +{ + int al, af, cf; + int eflags; + + eflags = cc_table[CC_OP].compute_all(); + cf = eflags & CC_C; + af = eflags & CC_A; + al = EAX & 0xff; + + eflags = 0; + if (((al & 0x0f) > 9 ) || af) { + al = (al + 6) & 0xff; + eflags |= CC_A; + } + if ((al > 0x9f) || cf) { + al = (al + 0x60) & 0xff; + eflags |= CC_C; + } + EAX = (EAX & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; + FORCE_RET(); +} + +void OPPROTO op_das(void) +{ + int al, al1, af, cf; + int eflags; + + eflags = cc_table[CC_OP].compute_all(); + cf = eflags & CC_C; + af = eflags & CC_A; + al = EAX & 0xff; + + eflags = 0; + al1 = al; + if (((al & 0x0f) > 9 ) || af) { + eflags |= CC_A; + if (al < 6 || cf) + eflags |= CC_C; + al = (al - 6) & 0xff; + } + if ((al1 > 0x99) || cf) { + al = (al - 0x60) & 0xff; + eflags |= CC_C; + } + EAX = (EAX & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; + FORCE_RET(); +} + +/* segment handling */ + +/* never use it with R_CS */ +void OPPROTO op_movl_seg_T0(void) +{ + load_seg(PARAM1, T0); +} + +/* faster VM86 version */ +void OPPROTO op_movl_seg_T0_vm(void) +{ + int selector; + SegmentCache *sc; + + selector = T0 & 0xffff; + /* env->segs[] access */ + sc = (SegmentCache *)((char *)env + PARAM1); + sc->selector = selector; + sc->base = (selector << 4); +} + +void OPPROTO op_movl_T0_seg(void) +{ + T0 = env->segs[PARAM1].selector; +} + +void OPPROTO op_lsl(void) +{ + helper_lsl(); +} + +void OPPROTO op_lar(void) +{ + helper_lar(); +} + +void OPPROTO op_verr(void) +{ + helper_verr(); +} + +void OPPROTO op_verw(void) +{ + helper_verw(); +} + +void OPPROTO op_arpl(void) +{ + if ((T0 & 3) < (T1 & 3)) { + /* XXX: emulate bug or 0xff3f0000 oring as in bochs ? */ + T0 = (T0 & ~3) | (T1 & 3); + T1 = CC_Z; + } else { + T1 = 0; + } + FORCE_RET(); +} + +void OPPROTO op_arpl_update(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + CC_SRC = (eflags & ~CC_Z) | T1; +} + +/* T0: segment, T1:eip */ +void OPPROTO op_ljmp_protected_T0_T1(void) +{ + helper_ljmp_protected_T0_T1(PARAM1); +} + +void OPPROTO op_lcall_real_T0_T1(void) +{ + helper_lcall_real_T0_T1(PARAM1, PARAM2); +} + +void OPPROTO op_lcall_protected_T0_T1(void) +{ + helper_lcall_protected_T0_T1(PARAM1, PARAM2); +} + +void OPPROTO op_iret_real(void) +{ + helper_iret_real(PARAM1); +} + +void OPPROTO op_iret_protected(void) +{ + helper_iret_protected(PARAM1, PARAM2); +} + +void OPPROTO op_lret_protected(void) +{ + helper_lret_protected(PARAM1, PARAM2); +} + +void OPPROTO op_lldt_T0(void) +{ + helper_lldt_T0(); +} + +void OPPROTO op_ltr_T0(void) +{ + helper_ltr_T0(); +} + +/* CR registers access */ +void OPPROTO op_movl_crN_T0(void) +{ + helper_movl_crN_T0(PARAM1); +} + +#if !defined(CONFIG_USER_ONLY) +void OPPROTO op_movtl_T0_cr8(void) +{ + T0 = cpu_get_apic_tpr(env); +} +#endif + +/* DR registers access */ +void OPPROTO op_movl_drN_T0(void) +{ + helper_movl_drN_T0(PARAM1); +} + +void OPPROTO op_lmsw_T0(void) +{ + /* only 4 lower bits of CR0 are modified. PE cannot be set to zero + if already set to one. */ + T0 = (env->cr[0] & ~0xe) | (T0 & 0xf); + helper_movl_crN_T0(0); +} + +void OPPROTO op_invlpg_A0(void) +{ + helper_invlpg(A0); +} + +void OPPROTO op_movl_T0_env(void) +{ + T0 = *(uint32_t *)((char *)env + PARAM1); +} + +void OPPROTO op_movl_env_T0(void) +{ + *(uint32_t *)((char *)env + PARAM1) = T0; +} + +void OPPROTO op_movl_env_T1(void) +{ + *(uint32_t *)((char *)env + PARAM1) = T1; +} + +void OPPROTO op_movtl_T0_env(void) +{ + T0 = *(target_ulong *)((char *)env + PARAM1); +} + +void OPPROTO op_movtl_env_T0(void) +{ + *(target_ulong *)((char *)env + PARAM1) = T0; +} + +void OPPROTO op_movtl_T1_env(void) +{ + T1 = *(target_ulong *)((char *)env + PARAM1); +} + +void OPPROTO op_movtl_env_T1(void) +{ + *(target_ulong *)((char *)env + PARAM1) = T1; +} + +void OPPROTO op_clts(void) +{ + env->cr[0] &= ~CR0_TS_MASK; + env->hflags &= ~HF_TS_MASK; +} + +/* flags handling */ + +void OPPROTO op_goto_tb0(void) +{ + GOTO_TB(op_goto_tb0, PARAM1, 0); +} + +void OPPROTO op_goto_tb1(void) +{ + GOTO_TB(op_goto_tb1, PARAM1, 1); +} + +void OPPROTO op_jmp_label(void) +{ + GOTO_LABEL_PARAM(1); +} + +void OPPROTO op_jnz_T0_label(void) +{ + if (T0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO op_jz_T0_label(void) +{ + if (!T0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +/* slow set cases (compute x86 flags) */ +void OPPROTO op_seto_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (eflags >> 11) & 1; +} + +void OPPROTO op_setb_T0_cc(void) +{ + T0 = cc_table[CC_OP].compute_c(); +} + +void OPPROTO op_setz_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (eflags >> 6) & 1; +} + +void OPPROTO op_setbe_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (eflags & (CC_Z | CC_C)) != 0; +} + +void OPPROTO op_sets_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (eflags >> 7) & 1; +} + +void OPPROTO op_setp_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (eflags >> 2) & 1; +} + +void OPPROTO op_setl_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = ((eflags ^ (eflags >> 4)) >> 7) & 1; +} + +void OPPROTO op_setle_T0_cc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + T0 = (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z)) != 0; +} + +void OPPROTO op_xor_T0_1(void) +{ + T0 ^= 1; +} + +void OPPROTO op_set_cc_op(void) +{ + CC_OP = PARAM1; +} + +void OPPROTO op_mov_T0_cc(void) +{ + T0 = cc_table[CC_OP].compute_all(); +} + +/* XXX: clear VIF/VIP in all ops ? */ + +void OPPROTO op_movl_eflags_T0(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK)); +} + +void OPPROTO op_movw_eflags_T0(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff); +} + +void OPPROTO op_movl_eflags_T0_io(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)); +} + +void OPPROTO op_movw_eflags_T0_io(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff); +} + +void OPPROTO op_movl_eflags_T0_cpl0(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)); +} + +void OPPROTO op_movw_eflags_T0_cpl0(void) +{ + load_eflags(T0, (TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff); +} + +#if 0 +/* vm86plus version */ +void OPPROTO op_movw_eflags_T0_vm(void) +{ + int eflags; + eflags = T0; + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + DF = 1 - (2 * ((eflags >> 10) & 1)); + /* we also update some system flags as in user mode */ + env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) | + (eflags & FL_UPDATE_MASK16); + if (eflags & IF_MASK) { + env->eflags |= VIF_MASK; + if (env->eflags & VIP_MASK) { + EIP = PARAM1; + raise_exception(EXCP0D_GPF); + } + } + FORCE_RET(); +} + +void OPPROTO op_movl_eflags_T0_vm(void) +{ + int eflags; + eflags = T0; + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + DF = 1 - (2 * ((eflags >> 10) & 1)); + /* we also update some system flags as in user mode */ + env->eflags = (env->eflags & ~(FL_UPDATE_MASK32 | VIF_MASK)) | + (eflags & FL_UPDATE_MASK32); + if (eflags & IF_MASK) { + env->eflags |= VIF_MASK; + if (env->eflags & VIP_MASK) { + EIP = PARAM1; + raise_exception(EXCP0D_GPF); + } + } + FORCE_RET(); +} +#endif + +/* XXX: compute only O flag */ +void OPPROTO op_movb_eflags_T0(void) +{ + int of; + of = cc_table[CC_OP].compute_all() & CC_O; + CC_SRC = (T0 & (CC_S | CC_Z | CC_A | CC_P | CC_C)) | of; +} + +void OPPROTO op_movl_T0_eflags(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + eflags |= (DF & DF_MASK); + eflags |= env->eflags & ~(VM_MASK | RF_MASK); + T0 = eflags; +} + +/* vm86plus version */ +#if 0 +void OPPROTO op_movl_T0_eflags_vm(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + eflags |= (DF & DF_MASK); + eflags |= env->eflags & ~(VM_MASK | RF_MASK | IF_MASK); + if (env->eflags & VIF_MASK) + eflags |= IF_MASK; + T0 = eflags; +} +#endif + +void OPPROTO op_cld(void) +{ + DF = 1; +} + +void OPPROTO op_std(void) +{ + DF = -1; +} + +void OPPROTO op_clc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + eflags &= ~CC_C; + CC_SRC = eflags; +} + +void OPPROTO op_stc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + eflags |= CC_C; + CC_SRC = eflags; +} + +void OPPROTO op_cmc(void) +{ + int eflags; + eflags = cc_table[CC_OP].compute_all(); + eflags ^= CC_C; + CC_SRC = eflags; +} + +void OPPROTO op_salc(void) +{ + int cf; + cf = cc_table[CC_OP].compute_c(); + EAX = (EAX & ~0xff) | ((-cf) & 0xff); +} + +static int compute_all_eflags(void) +{ + return CC_SRC; +} + +static int compute_c_eflags(void) +{ + return CC_SRC & CC_C; +} + +CCTable cc_table[CC_OP_NB] = { + [CC_OP_DYNAMIC] = { /* should never happen */ }, + + [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags }, + + [CC_OP_MULB] = { compute_all_mulb, compute_c_mull }, + [CC_OP_MULW] = { compute_all_mulw, compute_c_mull }, + [CC_OP_MULL] = { compute_all_mull, compute_c_mull }, + + [CC_OP_ADDB] = { compute_all_addb, compute_c_addb }, + [CC_OP_ADDW] = { compute_all_addw, compute_c_addw }, + [CC_OP_ADDL] = { compute_all_addl, compute_c_addl }, + + [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb }, + [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw }, + [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl }, + + [CC_OP_SUBB] = { compute_all_subb, compute_c_subb }, + [CC_OP_SUBW] = { compute_all_subw, compute_c_subw }, + [CC_OP_SUBL] = { compute_all_subl, compute_c_subl }, + + [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb }, + [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw }, + [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl }, + + [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb }, + [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw }, + [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl }, + + [CC_OP_INCB] = { compute_all_incb, compute_c_incl }, + [CC_OP_INCW] = { compute_all_incw, compute_c_incl }, + [CC_OP_INCL] = { compute_all_incl, compute_c_incl }, + + [CC_OP_DECB] = { compute_all_decb, compute_c_incl }, + [CC_OP_DECW] = { compute_all_decw, compute_c_incl }, + [CC_OP_DECL] = { compute_all_decl, compute_c_incl }, + + [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb }, + [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw }, + [CC_OP_SHLL] = { compute_all_shll, compute_c_shll }, + + [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl }, + [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl }, + [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl }, + +#ifdef TARGET_X86_64 + [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull }, + + [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq }, + + [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq }, + + [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq }, + + [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq }, + + [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq }, + + [CC_OP_INCQ] = { compute_all_incq, compute_c_incl }, + + [CC_OP_DECQ] = { compute_all_decq, compute_c_incl }, + + [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq }, + + [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl }, +#endif +}; + +/* floating point support. Some of the code for complicated x87 + functions comes from the LGPL'ed x86 emulator found in the Willows + TWIN windows emulator. */ + +/* fp load FT0 */ + +void OPPROTO op_flds_FT0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = ldl(A0); + FT0 = FP_CONVERT.f; +#else + FT0 = ldfl(A0); +#endif +} + +void OPPROTO op_fldl_FT0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.i64 = ldq(A0); + FT0 = FP_CONVERT.d; +#else + FT0 = ldfq(A0); +#endif +} + +/* helpers are needed to avoid static constant reference. XXX: find a better way */ +#ifdef USE_INT_TO_FLOAT_HELPERS + +void helper_fild_FT0_A0(void) +{ + FT0 = (CPU86_LDouble)ldsw(A0); +} + +void helper_fildl_FT0_A0(void) +{ + FT0 = (CPU86_LDouble)((int32_t)ldl(A0)); +} + +void helper_fildll_FT0_A0(void) +{ + FT0 = (CPU86_LDouble)((int64_t)ldq(A0)); +} + +void OPPROTO op_fild_FT0_A0(void) +{ + helper_fild_FT0_A0(); +} + +void OPPROTO op_fildl_FT0_A0(void) +{ + helper_fildl_FT0_A0(); +} + +void OPPROTO op_fildll_FT0_A0(void) +{ + helper_fildll_FT0_A0(); +} + +#else + +void OPPROTO op_fild_FT0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = ldsw(A0); + FT0 = (CPU86_LDouble)FP_CONVERT.i32; +#else + FT0 = (CPU86_LDouble)ldsw(A0); +#endif +} + +void OPPROTO op_fildl_FT0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = (int32_t) ldl(A0); + FT0 = (CPU86_LDouble)FP_CONVERT.i32; +#else + FT0 = (CPU86_LDouble)((int32_t)ldl(A0)); +#endif +} + +void OPPROTO op_fildll_FT0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.i64 = (int64_t) ldq(A0); + FT0 = (CPU86_LDouble)FP_CONVERT.i64; +#else + FT0 = (CPU86_LDouble)((int64_t)ldq(A0)); +#endif +} +#endif + +/* fp load ST0 */ + +void OPPROTO op_flds_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = ldl(A0); + env->fpregs[new_fpstt].d = FP_CONVERT.f; +#else + env->fpregs[new_fpstt].d = ldfl(A0); +#endif + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void OPPROTO op_fldl_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; +#ifdef USE_FP_CONVERT + FP_CONVERT.i64 = ldq(A0); + env->fpregs[new_fpstt].d = FP_CONVERT.d; +#else + env->fpregs[new_fpstt].d = ldfq(A0); +#endif + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void OPPROTO op_fldt_ST0_A0(void) +{ + helper_fldt_ST0_A0(); +} + +/* helpers are needed to avoid static constant reference. XXX: find a better way */ +#ifdef USE_INT_TO_FLOAT_HELPERS + +void helper_fild_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildl_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0)); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildll_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0)); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void OPPROTO op_fild_ST0_A0(void) +{ + helper_fild_ST0_A0(); +} + +void OPPROTO op_fildl_ST0_A0(void) +{ + helper_fildl_ST0_A0(); +} + +void OPPROTO op_fildll_ST0_A0(void) +{ + helper_fildll_ST0_A0(); +} + +#else + +void OPPROTO op_fild_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = ldsw(A0); + env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32; +#else + env->fpregs[new_fpstt].d = (CPU86_LDouble)ldsw(A0); +#endif + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void OPPROTO op_fildl_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; +#ifdef USE_FP_CONVERT + FP_CONVERT.i32 = (int32_t) ldl(A0); + env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i32; +#else + env->fpregs[new_fpstt].d = (CPU86_LDouble)((int32_t)ldl(A0)); +#endif + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void OPPROTO op_fildll_ST0_A0(void) +{ + int new_fpstt; + new_fpstt = (env->fpstt - 1) & 7; +#ifdef USE_FP_CONVERT + FP_CONVERT.i64 = (int64_t) ldq(A0); + env->fpregs[new_fpstt].d = (CPU86_LDouble)FP_CONVERT.i64; +#else + env->fpregs[new_fpstt].d = (CPU86_LDouble)((int64_t)ldq(A0)); +#endif + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +#endif + +/* fp store */ + +void OPPROTO op_fsts_ST0_A0(void) +{ +#ifdef USE_FP_CONVERT + FP_CONVERT.f = (float)ST0; + stfl(A0, FP_CONVERT.f); +#else + stfl(A0, (float)ST0); +#endif + FORCE_RET(); +} + +void OPPROTO op_fstl_ST0_A0(void) +{ + stfq(A0, (double)ST0); + FORCE_RET(); +} + +void OPPROTO op_fstt_ST0_A0(void) +{ + helper_fstt_ST0_A0(); +} + +void OPPROTO op_fist_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int val; + + d = ST0; + val = floatx_to_int32(d, &env->fp_status); + if (val != (int16_t)val) + val = -32768; + stw(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fistl_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int val; + + d = ST0; + val = floatx_to_int32(d, &env->fp_status); + stl(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fistll_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int64_t val; + + d = ST0; + val = floatx_to_int64(d, &env->fp_status); + stq(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fistt_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int val; + + d = ST0; + val = floatx_to_int32_round_to_zero(d, &env->fp_status); + if (val != (int16_t)val) + val = -32768; + stw(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fisttl_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int val; + + d = ST0; + val = floatx_to_int32_round_to_zero(d, &env->fp_status); + stl(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fisttll_ST0_A0(void) +{ +#if defined(__sparc__) && !defined(__sparc_v9__) + register CPU86_LDouble d asm("o0"); +#else + CPU86_LDouble d; +#endif + int64_t val; + + d = ST0; + val = floatx_to_int64_round_to_zero(d, &env->fp_status); + stq(A0, val); + FORCE_RET(); +} + +void OPPROTO op_fbld_ST0_A0(void) +{ + helper_fbld_ST0_A0(); +} + +void OPPROTO op_fbst_ST0_A0(void) +{ + helper_fbst_ST0_A0(); +} + +/* FPU move */ + +void OPPROTO op_fpush(void) +{ + fpush(); +} + +void OPPROTO op_fpop(void) +{ + fpop(); +} + +void OPPROTO op_fdecstp(void) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fpus &= (~0x4700); +} + +void OPPROTO op_fincstp(void) +{ + env->fpstt = (env->fpstt + 1) & 7; + env->fpus &= (~0x4700); +} + +void OPPROTO op_ffree_STN(void) +{ + env->fptags[(env->fpstt + PARAM1) & 7] = 1; +} + +void OPPROTO op_fmov_ST0_FT0(void) +{ + ST0 = FT0; +} + +void OPPROTO op_fmov_FT0_STN(void) +{ + FT0 = ST(PARAM1); +} + +void OPPROTO op_fmov_ST0_STN(void) +{ + ST0 = ST(PARAM1); +} + +void OPPROTO op_fmov_STN_ST0(void) +{ + ST(PARAM1) = ST0; +} + +void OPPROTO op_fxchg_ST0_STN(void) +{ + CPU86_LDouble tmp; + tmp = ST(PARAM1); + ST(PARAM1) = ST0; + ST0 = tmp; +} + +/* FPU operations */ + +const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; + +void OPPROTO op_fcom_ST0_FT0(void) +{ + int ret; + + ret = floatx_compare(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; + FORCE_RET(); +} + +void OPPROTO op_fucom_ST0_FT0(void) +{ + int ret; + + ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1]; + FORCE_RET(); +} + +const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; + +void OPPROTO op_fcomi_ST0_FT0(void) +{ + int eflags; + int ret; + + ret = floatx_compare(ST0, FT0, &env->fp_status); + eflags = cc_table[CC_OP].compute_all(); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; + FORCE_RET(); +} + +void OPPROTO op_fucomi_ST0_FT0(void) +{ + int eflags; + int ret; + + ret = floatx_compare_quiet(ST0, FT0, &env->fp_status); + eflags = cc_table[CC_OP].compute_all(); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; + FORCE_RET(); +} + +void OPPROTO op_fcmov_ST0_STN_T0(void) +{ + if (T0) { + ST0 = ST(PARAM1); + } + FORCE_RET(); +} + +void OPPROTO op_fadd_ST0_FT0(void) +{ + ST0 += FT0; +} + +void OPPROTO op_fmul_ST0_FT0(void) +{ + ST0 *= FT0; +} + +void OPPROTO op_fsub_ST0_FT0(void) +{ + ST0 -= FT0; +} + +void OPPROTO op_fsubr_ST0_FT0(void) +{ + ST0 = FT0 - ST0; +} + +void OPPROTO op_fdiv_ST0_FT0(void) +{ + ST0 = helper_fdiv(ST0, FT0); +} + +void OPPROTO op_fdivr_ST0_FT0(void) +{ + ST0 = helper_fdiv(FT0, ST0); +} + +/* fp operations between STN and ST0 */ + +void OPPROTO op_fadd_STN_ST0(void) +{ + ST(PARAM1) += ST0; +} + +void OPPROTO op_fmul_STN_ST0(void) +{ + ST(PARAM1) *= ST0; +} + +void OPPROTO op_fsub_STN_ST0(void) +{ + ST(PARAM1) -= ST0; +} + +void OPPROTO op_fsubr_STN_ST0(void) +{ + CPU86_LDouble *p; + p = &ST(PARAM1); + *p = ST0 - *p; +} + +void OPPROTO op_fdiv_STN_ST0(void) +{ + CPU86_LDouble *p; + p = &ST(PARAM1); + *p = helper_fdiv(*p, ST0); +} + +void OPPROTO op_fdivr_STN_ST0(void) +{ + CPU86_LDouble *p; + p = &ST(PARAM1); + *p = helper_fdiv(ST0, *p); +} + +/* misc FPU operations */ +void OPPROTO op_fchs_ST0(void) +{ + ST0 = floatx_chs(ST0); +} + +void OPPROTO op_fabs_ST0(void) +{ + ST0 = floatx_abs(ST0); +} + +void OPPROTO op_fxam_ST0(void) +{ + helper_fxam_ST0(); +} + +void OPPROTO op_fld1_ST0(void) +{ + ST0 = f15rk[1]; +} + +void OPPROTO op_fldl2t_ST0(void) +{ + ST0 = f15rk[6]; +} + +void OPPROTO op_fldl2e_ST0(void) +{ + ST0 = f15rk[5]; +} + +void OPPROTO op_fldpi_ST0(void) +{ + ST0 = f15rk[2]; +} + +void OPPROTO op_fldlg2_ST0(void) +{ + ST0 = f15rk[3]; +} + +void OPPROTO op_fldln2_ST0(void) +{ + ST0 = f15rk[4]; +} + +void OPPROTO op_fldz_ST0(void) +{ + ST0 = f15rk[0]; +} + +void OPPROTO op_fldz_FT0(void) +{ + FT0 = f15rk[0]; +} + +/* associated heplers to reduce generated code length and to simplify + relocation (FP constants are usually stored in .rodata section) */ + +void OPPROTO op_f2xm1(void) +{ + helper_f2xm1(); +} + +void OPPROTO op_fyl2x(void) +{ + helper_fyl2x(); +} + +void OPPROTO op_fptan(void) +{ + helper_fptan(); +} + +void OPPROTO op_fpatan(void) +{ + helper_fpatan(); +} + +void OPPROTO op_fxtract(void) +{ + helper_fxtract(); +} + +void OPPROTO op_fprem1(void) +{ + helper_fprem1(); +} + + +void OPPROTO op_fprem(void) +{ + helper_fprem(); +} + +void OPPROTO op_fyl2xp1(void) +{ + helper_fyl2xp1(); +} + +void OPPROTO op_fsqrt(void) +{ + helper_fsqrt(); +} + +void OPPROTO op_fsincos(void) +{ + helper_fsincos(); +} + +void OPPROTO op_frndint(void) +{ + helper_frndint(); +} + +void OPPROTO op_fscale(void) +{ + helper_fscale(); +} + +void OPPROTO op_fsin(void) +{ + helper_fsin(); +} + +void OPPROTO op_fcos(void) +{ + helper_fcos(); +} + +void OPPROTO op_fnstsw_A0(void) +{ + int fpus; + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + stw(A0, fpus); + FORCE_RET(); +} + +void OPPROTO op_fnstsw_EAX(void) +{ + int fpus; + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + EAX = (EAX & ~0xffff) | fpus; +} + +void OPPROTO op_fnstcw_A0(void) +{ + stw(A0, env->fpuc); + FORCE_RET(); +} + +void OPPROTO op_fldcw_A0(void) +{ + env->fpuc = lduw(A0); + update_fp_status(); +} + +void OPPROTO op_fclex(void) +{ + env->fpus &= 0x7f00; +} + +void OPPROTO op_fwait(void) +{ + if (env->fpus & FPUS_SE) + fpu_raise_exception(); + FORCE_RET(); +} + +void OPPROTO op_fninit(void) +{ + env->fpus = 0; + env->fpstt = 0; + env->fpuc = 0x37f; + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +void OPPROTO op_fnstenv_A0(void) +{ + helper_fstenv(A0, PARAM1); +} + +void OPPROTO op_fldenv_A0(void) +{ + helper_fldenv(A0, PARAM1); +} + +void OPPROTO op_fnsave_A0(void) +{ + helper_fsave(A0, PARAM1); +} + +void OPPROTO op_frstor_A0(void) +{ + helper_frstor(A0, PARAM1); +} + +/* threading support */ +void OPPROTO op_lock(void) +{ + cpu_lock(); +} + +void OPPROTO op_unlock(void) +{ + cpu_unlock(); +} + +/* SSE support */ +static inline void memcpy16(void *d, void *s) +{ + ((uint32_t *)d)[0] = ((uint32_t *)s)[0]; + ((uint32_t *)d)[1] = ((uint32_t *)s)[1]; + ((uint32_t *)d)[2] = ((uint32_t *)s)[2]; + ((uint32_t *)d)[3] = ((uint32_t *)s)[3]; +} + +void OPPROTO op_movo(void) +{ + /* XXX: badly generated code */ + XMMReg *d, *s; + d = (XMMReg *)((char *)env + PARAM1); + s = (XMMReg *)((char *)env + PARAM2); + memcpy16(d, s); +} + +void OPPROTO op_movq(void) +{ + uint64_t *d, *s; + d = (uint64_t *)((char *)env + PARAM1); + s = (uint64_t *)((char *)env + PARAM2); + *d = *s; +} + +void OPPROTO op_movl(void) +{ + uint32_t *d, *s; + d = (uint32_t *)((char *)env + PARAM1); + s = (uint32_t *)((char *)env + PARAM2); + *d = *s; +} + +void OPPROTO op_movq_env_0(void) +{ + uint64_t *d; + d = (uint64_t *)((char *)env + PARAM1); + *d = 0; +} + +void OPPROTO op_fxsave_A0(void) +{ + helper_fxsave(A0, PARAM1); +} + +void OPPROTO op_fxrstor_A0(void) +{ + helper_fxrstor(A0, PARAM1); +} + +/* XXX: optimize by storing fptt and fptags in the static cpu state */ +void OPPROTO op_enter_mmx(void) +{ + env->fpstt = 0; + *(uint32_t *)(env->fptags) = 0; + *(uint32_t *)(env->fptags + 4) = 0; +} + +void OPPROTO op_emms(void) +{ + /* set to empty state */ + *(uint32_t *)(env->fptags) = 0x01010101; + *(uint32_t *)(env->fptags + 4) = 0x01010101; +} + +#define SHIFT 0 +#include "ops_sse.h" + +#define SHIFT 1 +#include "ops_sse.h" diff --git a/target-i386/opreg_template.h b/target-i386/opreg_template.h new file mode 100644 index 0000000..6480636 --- /dev/null +++ b/target-i386/opreg_template.h @@ -0,0 +1,190 @@ +/* + * i386 micro operations (templates for various register related + * operations) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +void OPPROTO glue(op_movl_A0,REGNAME)(void) +{ + A0 = (uint32_t)REG; +} + +void OPPROTO glue(op_addl_A0,REGNAME)(void) +{ + A0 = (uint32_t)(A0 + REG); +} + +void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void) +{ + A0 = (uint32_t)(A0 + (REG << 1)); +} + +void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void) +{ + A0 = (uint32_t)(A0 + (REG << 2)); +} + +void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void) +{ + A0 = (uint32_t)(A0 + (REG << 3)); +} + +#ifdef TARGET_X86_64 +void OPPROTO glue(op_movq_A0,REGNAME)(void) +{ + A0 = REG; +} + +void OPPROTO glue(op_addq_A0,REGNAME)(void) +{ + A0 = (A0 + REG); +} + +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s1)(void) +{ + A0 = (A0 + (REG << 1)); +} + +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s2)(void) +{ + A0 = (A0 + (REG << 2)); +} + +void OPPROTO glue(glue(op_addq_A0,REGNAME),_s3)(void) +{ + A0 = (A0 + (REG << 3)); +} +#endif + +void OPPROTO glue(op_movl_T0,REGNAME)(void) +{ + T0 = REG; +} + +void OPPROTO glue(op_movl_T1,REGNAME)(void) +{ + T1 = REG; +} + +void OPPROTO glue(op_movh_T0,REGNAME)(void) +{ + T0 = REG >> 8; +} + +void OPPROTO glue(op_movh_T1,REGNAME)(void) +{ + T1 = REG >> 8; +} + +void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void) +{ + REG = (uint32_t)T0; +} + +void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void) +{ + REG = (uint32_t)T1; +} + +void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void) +{ + REG = (uint32_t)A0; +} + +#ifdef TARGET_X86_64 +void OPPROTO glue(glue(op_movq,REGNAME),_T0)(void) +{ + REG = T0; +} + +void OPPROTO glue(glue(op_movq,REGNAME),_T1)(void) +{ + REG = T1; +} + +void OPPROTO glue(glue(op_movq,REGNAME),_A0)(void) +{ + REG = A0; +} +#endif + +/* mov T1 to REG if T0 is true */ +void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void) +{ + if (T0) + REG = (REG & ~0xffff) | (T1 & 0xffff); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void) +{ + if (T0) + REG = (uint32_t)T1; + FORCE_RET(); +} + +#ifdef TARGET_X86_64 +void OPPROTO glue(glue(op_cmovq,REGNAME),_T1_T0)(void) +{ + if (T0) + REG = T1; + FORCE_RET(); +} +#endif + +/* NOTE: T0 high order bits are ignored */ +void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void) +{ + REG = (REG & ~0xffff) | (T0 & 0xffff); +} + +/* NOTE: T0 high order bits are ignored */ +void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void) +{ + REG = (REG & ~0xffff) | (T1 & 0xffff); +} + +/* NOTE: A0 high order bits are ignored */ +void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void) +{ + REG = (REG & ~0xffff) | (A0 & 0xffff); +} + +/* NOTE: T0 high order bits are ignored */ +void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void) +{ + REG = (REG & ~0xff) | (T0 & 0xff); +} + +/* NOTE: T0 high order bits are ignored */ +void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void) +{ + REG = (REG & ~0xff00) | ((T0 & 0xff) << 8); +} + +/* NOTE: T1 high order bits are ignored */ +void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void) +{ + REG = (REG & ~0xff) | (T1 & 0xff); +} + +/* NOTE: T1 high order bits are ignored */ +void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void) +{ + REG = (REG & ~0xff00) | ((T1 & 0xff) << 8); +} + diff --git a/target-i386/ops_mem.h b/target-i386/ops_mem.h new file mode 100644 index 0000000..7ec84dd --- /dev/null +++ b/target-i386/ops_mem.h @@ -0,0 +1,156 @@ +void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void) +{ + T0 = glue(ldub, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void) +{ + T0 = glue(ldsb, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void) +{ + T0 = glue(lduw, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void) +{ + T0 = glue(ldsw, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void) +{ + T0 = (uint32_t)glue(ldl, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void) +{ + T1 = glue(ldub, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void) +{ + T1 = glue(ldsb, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void) +{ + T1 = glue(lduw, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void) +{ + T1 = glue(ldsw, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void) +{ + T1 = (uint32_t)glue(ldl, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void) +{ + glue(stb, MEMSUFFIX)(A0, T0); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void) +{ + glue(stw, MEMSUFFIX)(A0, T0); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void) +{ + glue(stl, MEMSUFFIX)(A0, T0); + FORCE_RET(); +} + +#if 0 +void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T1_A0)(void) +{ + glue(stb, MEMSUFFIX)(A0, T1); + FORCE_RET(); +} +#endif + +void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T1_A0)(void) +{ + glue(stw, MEMSUFFIX)(A0, T1); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T1_A0)(void) +{ + glue(stl, MEMSUFFIX)(A0, T1); + FORCE_RET(); +} + +/* SSE/MMX support */ +void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _env_A0)(void) +{ + uint64_t *p; + p = (uint64_t *)((char *)env + PARAM1); + *p = glue(ldq, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_stq, MEMSUFFIX), _env_A0)(void) +{ + uint64_t *p; + p = (uint64_t *)((char *)env + PARAM1); + glue(stq, MEMSUFFIX)(A0, *p); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_ldo, MEMSUFFIX), _env_A0)(void) +{ + XMMReg *p; + p = (XMMReg *)((char *)env + PARAM1); + p->XMM_Q(0) = glue(ldq, MEMSUFFIX)(A0); + p->XMM_Q(1) = glue(ldq, MEMSUFFIX)(A0 + 8); +} + +void OPPROTO glue(glue(op_sto, MEMSUFFIX), _env_A0)(void) +{ + XMMReg *p; + p = (XMMReg *)((char *)env + PARAM1); + glue(stq, MEMSUFFIX)(A0, p->XMM_Q(0)); + glue(stq, MEMSUFFIX)(A0 + 8, p->XMM_Q(1)); + FORCE_RET(); +} + +#ifdef TARGET_X86_64 +void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T0_A0)(void) +{ + T0 = (int32_t)glue(ldl, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldsl, MEMSUFFIX), _T1_A0)(void) +{ + T1 = (int32_t)glue(ldl, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T0_A0)(void) +{ + T0 = glue(ldq, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_ldq, MEMSUFFIX), _T1_A0)(void) +{ + T1 = glue(ldq, MEMSUFFIX)(A0); +} + +void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T0_A0)(void) +{ + glue(stq, MEMSUFFIX)(A0, T0); + FORCE_RET(); +} + +void OPPROTO glue(glue(op_stq, MEMSUFFIX), _T1_A0)(void) +{ + glue(stq, MEMSUFFIX)(A0, T1); + FORCE_RET(); +} +#endif + +#undef MEMSUFFIX diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h new file mode 100644 index 0000000..cdc3801 --- /dev/null +++ b/target-i386/ops_sse.h @@ -0,0 +1,1374 @@ +/* + * MMX/SSE/SSE2/PNI support + * + * Copyright (c) 2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#if SHIFT == 0 +#define Reg MMXReg +#define XMM_ONLY(x...) +#define B(n) MMX_B(n) +#define W(n) MMX_W(n) +#define L(n) MMX_L(n) +#define Q(n) q +#define SUFFIX _mmx +#else +#define Reg XMMReg +#define XMM_ONLY(x...) x +#define B(n) XMM_B(n) +#define W(n) XMM_W(n) +#define L(n) XMM_L(n) +#define Q(n) XMM_Q(n) +#define SUFFIX _xmm +#endif + +void OPPROTO glue(op_psrlw, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 15) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->W(0) >>= shift; + d->W(1) >>= shift; + d->W(2) >>= shift; + d->W(3) >>= shift; +#if SHIFT == 1 + d->W(4) >>= shift; + d->W(5) >>= shift; + d->W(6) >>= shift; + d->W(7) >>= shift; +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(op_psraw, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 15) { + shift = 15; + } else { + shift = s->B(0); + } + d->W(0) = (int16_t)d->W(0) >> shift; + d->W(1) = (int16_t)d->W(1) >> shift; + d->W(2) = (int16_t)d->W(2) >> shift; + d->W(3) = (int16_t)d->W(3) >> shift; +#if SHIFT == 1 + d->W(4) = (int16_t)d->W(4) >> shift; + d->W(5) = (int16_t)d->W(5) >> shift; + d->W(6) = (int16_t)d->W(6) >> shift; + d->W(7) = (int16_t)d->W(7) >> shift; +#endif +} + +void OPPROTO glue(op_psllw, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 15) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->W(0) <<= shift; + d->W(1) <<= shift; + d->W(2) <<= shift; + d->W(3) <<= shift; +#if SHIFT == 1 + d->W(4) <<= shift; + d->W(5) <<= shift; + d->W(6) <<= shift; + d->W(7) <<= shift; +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(op_psrld, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 31) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->L(0) >>= shift; + d->L(1) >>= shift; +#if SHIFT == 1 + d->L(2) >>= shift; + d->L(3) >>= shift; +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(op_psrad, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 31) { + shift = 31; + } else { + shift = s->B(0); + } + d->L(0) = (int32_t)d->L(0) >> shift; + d->L(1) = (int32_t)d->L(1) >> shift; +#if SHIFT == 1 + d->L(2) = (int32_t)d->L(2) >> shift; + d->L(3) = (int32_t)d->L(3) >> shift; +#endif +} + +void OPPROTO glue(op_pslld, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 31) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->L(0) <<= shift; + d->L(1) <<= shift; +#if SHIFT == 1 + d->L(2) <<= shift; + d->L(3) <<= shift; +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(op_psrlq, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 63) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->Q(0) >>= shift; +#if SHIFT == 1 + d->Q(1) >>= shift; +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(op_psllq, SUFFIX)(void) +{ + Reg *d, *s; + int shift; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + if (s->Q(0) > 63) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->Q(0) <<= shift; +#if SHIFT == 1 + d->Q(1) <<= shift; +#endif + } + FORCE_RET(); +} + +#if SHIFT == 1 +void OPPROTO glue(op_psrldq, SUFFIX)(void) +{ + Reg *d, *s; + int shift, i; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + shift = s->L(0); + if (shift > 16) + shift = 16; + for(i = 0; i < 16 - shift; i++) + d->B(i) = d->B(i + shift); + for(i = 16 - shift; i < 16; i++) + d->B(i) = 0; + FORCE_RET(); +} + +void OPPROTO glue(op_pslldq, SUFFIX)(void) +{ + Reg *d, *s; + int shift, i; + + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + shift = s->L(0); + if (shift > 16) + shift = 16; + for(i = 15; i >= shift; i--) + d->B(i) = d->B(i - shift); + for(i = 0; i < shift; i++) + d->B(i) = 0; + FORCE_RET(); +} +#endif + +#define SSE_OP_B(name, F)\ +void OPPROTO glue(name, SUFFIX) (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->B(0) = F(d->B(0), s->B(0));\ + d->B(1) = F(d->B(1), s->B(1));\ + d->B(2) = F(d->B(2), s->B(2));\ + d->B(3) = F(d->B(3), s->B(3));\ + d->B(4) = F(d->B(4), s->B(4));\ + d->B(5) = F(d->B(5), s->B(5));\ + d->B(6) = F(d->B(6), s->B(6));\ + d->B(7) = F(d->B(7), s->B(7));\ + XMM_ONLY(\ + d->B(8) = F(d->B(8), s->B(8));\ + d->B(9) = F(d->B(9), s->B(9));\ + d->B(10) = F(d->B(10), s->B(10));\ + d->B(11) = F(d->B(11), s->B(11));\ + d->B(12) = F(d->B(12), s->B(12));\ + d->B(13) = F(d->B(13), s->B(13));\ + d->B(14) = F(d->B(14), s->B(14));\ + d->B(15) = F(d->B(15), s->B(15));\ + )\ +} + +#define SSE_OP_W(name, F)\ +void OPPROTO glue(name, SUFFIX) (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->W(0) = F(d->W(0), s->W(0));\ + d->W(1) = F(d->W(1), s->W(1));\ + d->W(2) = F(d->W(2), s->W(2));\ + d->W(3) = F(d->W(3), s->W(3));\ + XMM_ONLY(\ + d->W(4) = F(d->W(4), s->W(4));\ + d->W(5) = F(d->W(5), s->W(5));\ + d->W(6) = F(d->W(6), s->W(6));\ + d->W(7) = F(d->W(7), s->W(7));\ + )\ +} + +#define SSE_OP_L(name, F)\ +void OPPROTO glue(name, SUFFIX) (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->L(0) = F(d->L(0), s->L(0));\ + d->L(1) = F(d->L(1), s->L(1));\ + XMM_ONLY(\ + d->L(2) = F(d->L(2), s->L(2));\ + d->L(3) = F(d->L(3), s->L(3));\ + )\ +} + +#define SSE_OP_Q(name, F)\ +void OPPROTO glue(name, SUFFIX) (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->Q(0) = F(d->Q(0), s->Q(0));\ + XMM_ONLY(\ + d->Q(1) = F(d->Q(1), s->Q(1));\ + )\ +} + +#if SHIFT == 0 +static inline int satub(int x) +{ + if (x < 0) + return 0; + else if (x > 255) + return 255; + else + return x; +} + +static inline int satuw(int x) +{ + if (x < 0) + return 0; + else if (x > 65535) + return 65535; + else + return x; +} + +static inline int satsb(int x) +{ + if (x < -128) + return -128; + else if (x > 127) + return 127; + else + return x; +} + +static inline int satsw(int x) +{ + if (x < -32768) + return -32768; + else if (x > 32767) + return 32767; + else + return x; +} + +#define FADD(a, b) ((a) + (b)) +#define FADDUB(a, b) satub((a) + (b)) +#define FADDUW(a, b) satuw((a) + (b)) +#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b)) +#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b)) + +#define FSUB(a, b) ((a) - (b)) +#define FSUBUB(a, b) satub((a) - (b)) +#define FSUBUW(a, b) satuw((a) - (b)) +#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b)) +#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b)) +#define FMINUB(a, b) ((a) < (b)) ? (a) : (b) +#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b) +#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) +#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) + +#define FAND(a, b) (a) & (b) +#define FANDN(a, b) ((~(a)) & (b)) +#define FOR(a, b) (a) | (b) +#define FXOR(a, b) (a) ^ (b) + +#define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0 +#define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0 +#define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0 +#define FCMPEQ(a, b) (a) == (b) ? -1 : 0 + +#define FMULLW(a, b) (a) * (b) +#define FMULHUW(a, b) (a) * (b) >> 16 +#define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16 + +#define FAVG(a, b) ((a) + (b) + 1) >> 1 +#endif + +SSE_OP_B(op_paddb, FADD) +SSE_OP_W(op_paddw, FADD) +SSE_OP_L(op_paddl, FADD) +SSE_OP_Q(op_paddq, FADD) + +SSE_OP_B(op_psubb, FSUB) +SSE_OP_W(op_psubw, FSUB) +SSE_OP_L(op_psubl, FSUB) +SSE_OP_Q(op_psubq, FSUB) + +SSE_OP_B(op_paddusb, FADDUB) +SSE_OP_B(op_paddsb, FADDSB) +SSE_OP_B(op_psubusb, FSUBUB) +SSE_OP_B(op_psubsb, FSUBSB) + +SSE_OP_W(op_paddusw, FADDUW) +SSE_OP_W(op_paddsw, FADDSW) +SSE_OP_W(op_psubusw, FSUBUW) +SSE_OP_W(op_psubsw, FSUBSW) + +SSE_OP_B(op_pminub, FMINUB) +SSE_OP_B(op_pmaxub, FMAXUB) + +SSE_OP_W(op_pminsw, FMINSW) +SSE_OP_W(op_pmaxsw, FMAXSW) + +SSE_OP_Q(op_pand, FAND) +SSE_OP_Q(op_pandn, FANDN) +SSE_OP_Q(op_por, FOR) +SSE_OP_Q(op_pxor, FXOR) + +SSE_OP_B(op_pcmpgtb, FCMPGTB) +SSE_OP_W(op_pcmpgtw, FCMPGTW) +SSE_OP_L(op_pcmpgtl, FCMPGTL) + +SSE_OP_B(op_pcmpeqb, FCMPEQ) +SSE_OP_W(op_pcmpeqw, FCMPEQ) +SSE_OP_L(op_pcmpeql, FCMPEQ) + +SSE_OP_W(op_pmullw, FMULLW) +SSE_OP_W(op_pmulhuw, FMULHUW) +SSE_OP_W(op_pmulhw, FMULHW) + +SSE_OP_B(op_pavgb, FAVG) +SSE_OP_W(op_pavgw, FAVG) + +void OPPROTO glue(op_pmuludq, SUFFIX) (void) +{ + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0); +#if SHIFT == 1 + d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2); +#endif +} + +void OPPROTO glue(op_pmaddwd, SUFFIX) (void) +{ + int i; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + for(i = 0; i < (2 << SHIFT); i++) { + d->L(i) = (int16_t)s->W(2*i) * (int16_t)d->W(2*i) + + (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1); + } + FORCE_RET(); +} + +#if SHIFT == 0 +static inline int abs1(int a) +{ + if (a < 0) + return -a; + else + return a; +} +#endif +void OPPROTO glue(op_psadbw, SUFFIX) (void) +{ + unsigned int val; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + val = 0; + val += abs1(d->B(0) - s->B(0)); + val += abs1(d->B(1) - s->B(1)); + val += abs1(d->B(2) - s->B(2)); + val += abs1(d->B(3) - s->B(3)); + val += abs1(d->B(4) - s->B(4)); + val += abs1(d->B(5) - s->B(5)); + val += abs1(d->B(6) - s->B(6)); + val += abs1(d->B(7) - s->B(7)); + d->Q(0) = val; +#if SHIFT == 1 + val = 0; + val += abs1(d->B(8) - s->B(8)); + val += abs1(d->B(9) - s->B(9)); + val += abs1(d->B(10) - s->B(10)); + val += abs1(d->B(11) - s->B(11)); + val += abs1(d->B(12) - s->B(12)); + val += abs1(d->B(13) - s->B(13)); + val += abs1(d->B(14) - s->B(14)); + val += abs1(d->B(15) - s->B(15)); + d->Q(1) = val; +#endif +} + +void OPPROTO glue(op_maskmov, SUFFIX) (void) +{ + int i; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + for(i = 0; i < (8 << SHIFT); i++) { + if (s->B(i) & 0x80) + stb(A0 + i, d->B(i)); + } + FORCE_RET(); +} + +void OPPROTO glue(op_movl_mm_T0, SUFFIX) (void) +{ + Reg *d; + d = (Reg *)((char *)env + PARAM1); + d->L(0) = T0; + d->L(1) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif +} + +void OPPROTO glue(op_movl_T0_mm, SUFFIX) (void) +{ + Reg *s; + s = (Reg *)((char *)env + PARAM1); + T0 = s->L(0); +} + +#if SHIFT == 0 +void OPPROTO glue(op_pshufw, SUFFIX) (void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.W(0) = s->W(order & 3); + r.W(1) = s->W((order >> 2) & 3); + r.W(2) = s->W((order >> 4) & 3); + r.W(3) = s->W((order >> 6) & 3); + *d = r; +} +#else +void OPPROTO op_shufps(void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.L(0) = d->L(order & 3); + r.L(1) = d->L((order >> 2) & 3); + r.L(2) = s->L((order >> 4) & 3); + r.L(3) = s->L((order >> 6) & 3); + *d = r; +} + +void OPPROTO op_shufpd(void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.Q(0) = d->Q(order & 1); + r.Q(1) = s->Q((order >> 1) & 1); + *d = r; +} + +void OPPROTO glue(op_pshufd, SUFFIX) (void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.L(0) = s->L(order & 3); + r.L(1) = s->L((order >> 2) & 3); + r.L(2) = s->L((order >> 4) & 3); + r.L(3) = s->L((order >> 6) & 3); + *d = r; +} + +void OPPROTO glue(op_pshuflw, SUFFIX) (void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.W(0) = s->W(order & 3); + r.W(1) = s->W((order >> 2) & 3); + r.W(2) = s->W((order >> 4) & 3); + r.W(3) = s->W((order >> 6) & 3); + r.Q(1) = s->Q(1); + *d = r; +} + +void OPPROTO glue(op_pshufhw, SUFFIX) (void) +{ + Reg r, *d, *s; + int order; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + order = PARAM3; + r.Q(0) = s->Q(0); + r.W(4) = s->W(4 + (order & 3)); + r.W(5) = s->W(4 + ((order >> 2) & 3)); + r.W(6) = s->W(4 + ((order >> 4) & 3)); + r.W(7) = s->W(4 + ((order >> 6) & 3)); + *d = r; +} +#endif + +#if SHIFT == 1 +/* FPU ops */ +/* XXX: not accurate */ + +#define SSE_OP_S(name, F)\ +void OPPROTO op_ ## name ## ps (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ + d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ + d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ + d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ +}\ +\ +void OPPROTO op_ ## name ## ss (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ +}\ +void OPPROTO op_ ## name ## pd (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ + d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ +}\ +\ +void OPPROTO op_ ## name ## sd (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ +} + +#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) +#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) +#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) +#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) +#define FPU_MIN(size, a, b) (a) < (b) ? (a) : (b) +#define FPU_MAX(size, a, b) (a) > (b) ? (a) : (b) +#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status) + +SSE_OP_S(add, FPU_ADD) +SSE_OP_S(sub, FPU_SUB) +SSE_OP_S(mul, FPU_MUL) +SSE_OP_S(div, FPU_DIV) +SSE_OP_S(min, FPU_MIN) +SSE_OP_S(max, FPU_MAX) +SSE_OP_S(sqrt, FPU_SQRT) + + +/* float to float conversions */ +void OPPROTO op_cvtps2pd(void) +{ + float32 s0, s1; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + s0 = s->XMM_S(0); + s1 = s->XMM_S(1); + d->XMM_D(0) = float32_to_float64(s0, &env->sse_status); + d->XMM_D(1) = float32_to_float64(s1, &env->sse_status); +} + +void OPPROTO op_cvtpd2ps(void) +{ + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); + d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status); + d->Q(1) = 0; +} + +void OPPROTO op_cvtss2sd(void) +{ + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status); +} + +void OPPROTO op_cvtsd2ss(void) +{ + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); +} + +/* integer to float */ +void OPPROTO op_cvtdq2ps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status); + d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status); + d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status); + d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status); +} + +void OPPROTO op_cvtdq2pd(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + int32_t l0, l1; + l0 = (int32_t)s->XMM_L(0); + l1 = (int32_t)s->XMM_L(1); + d->XMM_D(0) = int32_to_float64(l0, &env->sse_status); + d->XMM_D(1) = int32_to_float64(l1, &env->sse_status); +} + +void OPPROTO op_cvtpi2ps(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + MMXReg *s = (MMXReg *)((char *)env + PARAM2); + d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); + d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); +} + +void OPPROTO op_cvtpi2pd(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + MMXReg *s = (MMXReg *)((char *)env + PARAM2); + d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); + d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); +} + +void OPPROTO op_cvtsi2ss(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + d->XMM_S(0) = int32_to_float32(T0, &env->sse_status); +} + +void OPPROTO op_cvtsi2sd(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + d->XMM_D(0) = int32_to_float64(T0, &env->sse_status); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_cvtsq2ss(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + d->XMM_S(0) = int64_to_float32(T0, &env->sse_status); +} + +void OPPROTO op_cvtsq2sd(void) +{ + XMMReg *d = (Reg *)((char *)env + PARAM1); + d->XMM_D(0) = int64_to_float64(T0, &env->sse_status); +} +#endif + +/* float to integer */ +void OPPROTO op_cvtps2dq(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status); + d->XMM_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status); + d->XMM_L(2) = float32_to_int32(s->XMM_S(2), &env->sse_status); + d->XMM_L(3) = float32_to_int32(s->XMM_S(3), &env->sse_status); +} + +void OPPROTO op_cvtpd2dq(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status); + d->XMM_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status); + d->XMM_Q(1) = 0; +} + +void OPPROTO op_cvtps2pi(void) +{ + MMXReg *d = (MMXReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->MMX_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status); + d->MMX_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status); +} + +void OPPROTO op_cvtpd2pi(void) +{ + MMXReg *d = (MMXReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->MMX_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status); + d->MMX_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status); +} + +void OPPROTO op_cvtss2si(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float32_to_int32(s->XMM_S(0), &env->sse_status); +} + +void OPPROTO op_cvtsd2si(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float64_to_int32(s->XMM_D(0), &env->sse_status); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_cvtss2sq(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float32_to_int64(s->XMM_S(0), &env->sse_status); +} + +void OPPROTO op_cvtsd2sq(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float64_to_int64(s->XMM_D(0), &env->sse_status); +} +#endif + +/* float to integer truncated */ +void OPPROTO op_cvttps2dq(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); + d->XMM_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); + d->XMM_L(2) = float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status); + d->XMM_L(3) = float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status); +} + +void OPPROTO op_cvttpd2dq(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); + d->XMM_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); + d->XMM_Q(1) = 0; +} + +void OPPROTO op_cvttps2pi(void) +{ + MMXReg *d = (MMXReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->MMX_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); + d->MMX_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); +} + +void OPPROTO op_cvttpd2pi(void) +{ + MMXReg *d = (MMXReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->MMX_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); + d->MMX_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); +} + +void OPPROTO op_cvttss2si(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); +} + +void OPPROTO op_cvttsd2si(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); +} + +#ifdef TARGET_X86_64 +void OPPROTO op_cvttss2sq(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status); +} + +void OPPROTO op_cvttsd2sq(void) +{ + XMMReg *s = (XMMReg *)((char *)env + PARAM1); + T0 = float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status); +} +#endif + +void OPPROTO op_rsqrtps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = approx_rsqrt(s->XMM_S(0)); + d->XMM_S(1) = approx_rsqrt(s->XMM_S(1)); + d->XMM_S(2) = approx_rsqrt(s->XMM_S(2)); + d->XMM_S(3) = approx_rsqrt(s->XMM_S(3)); +} + +void OPPROTO op_rsqrtss(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = approx_rsqrt(s->XMM_S(0)); +} + +void OPPROTO op_rcpps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = approx_rcp(s->XMM_S(0)); + d->XMM_S(1) = approx_rcp(s->XMM_S(1)); + d->XMM_S(2) = approx_rcp(s->XMM_S(2)); + d->XMM_S(3) = approx_rcp(s->XMM_S(3)); +} + +void OPPROTO op_rcpss(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = approx_rcp(s->XMM_S(0)); +} + +void OPPROTO op_haddps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + XMMReg r; + r.XMM_S(0) = d->XMM_S(0) + d->XMM_S(1); + r.XMM_S(1) = d->XMM_S(2) + d->XMM_S(3); + r.XMM_S(2) = s->XMM_S(0) + s->XMM_S(1); + r.XMM_S(3) = s->XMM_S(2) + s->XMM_S(3); + *d = r; +} + +void OPPROTO op_haddpd(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + XMMReg r; + r.XMM_D(0) = d->XMM_D(0) + d->XMM_D(1); + r.XMM_D(1) = s->XMM_D(0) + s->XMM_D(1); + *d = r; +} + +void OPPROTO op_hsubps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + XMMReg r; + r.XMM_S(0) = d->XMM_S(0) - d->XMM_S(1); + r.XMM_S(1) = d->XMM_S(2) - d->XMM_S(3); + r.XMM_S(2) = s->XMM_S(0) - s->XMM_S(1); + r.XMM_S(3) = s->XMM_S(2) - s->XMM_S(3); + *d = r; +} + +void OPPROTO op_hsubpd(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + XMMReg r; + r.XMM_D(0) = d->XMM_D(0) - d->XMM_D(1); + r.XMM_D(1) = s->XMM_D(0) - s->XMM_D(1); + *d = r; +} + +void OPPROTO op_addsubps(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_S(0) = d->XMM_S(0) - s->XMM_S(0); + d->XMM_S(1) = d->XMM_S(1) + s->XMM_S(1); + d->XMM_S(2) = d->XMM_S(2) - s->XMM_S(2); + d->XMM_S(3) = d->XMM_S(3) + s->XMM_S(3); +} + +void OPPROTO op_addsubpd(void) +{ + XMMReg *d = (XMMReg *)((char *)env + PARAM1); + XMMReg *s = (XMMReg *)((char *)env + PARAM2); + d->XMM_D(0) = d->XMM_D(0) - s->XMM_D(0); + d->XMM_D(1) = d->XMM_D(1) + s->XMM_D(1); +} + +/* XXX: unordered */ +#define SSE_OP_CMP(name, F)\ +void OPPROTO op_ ## name ## ps (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ + d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\ + d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\ + d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\ +}\ +\ +void OPPROTO op_ ## name ## ss (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\ +}\ +void OPPROTO op_ ## name ## pd (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ + d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\ +}\ +\ +void OPPROTO op_ ## name ## sd (void)\ +{\ + Reg *d, *s;\ + d = (Reg *)((char *)env + PARAM1);\ + s = (Reg *)((char *)env + PARAM2);\ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\ +} + +#define FPU_CMPEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? -1 : 0 +#define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0 +#define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0 +#define FPU_CMPUNORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? - 1 : 0 +#define FPU_CMPNEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? 0 : -1 +#define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1 +#define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1 +#define FPU_CMPORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? 0 : -1 + +SSE_OP_CMP(cmpeq, FPU_CMPEQ) +SSE_OP_CMP(cmplt, FPU_CMPLT) +SSE_OP_CMP(cmple, FPU_CMPLE) +SSE_OP_CMP(cmpunord, FPU_CMPUNORD) +SSE_OP_CMP(cmpneq, FPU_CMPNEQ) +SSE_OP_CMP(cmpnlt, FPU_CMPNLT) +SSE_OP_CMP(cmpnle, FPU_CMPNLE) +SSE_OP_CMP(cmpord, FPU_CMPORD) + +const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; + +void OPPROTO op_ucomiss(void) +{ + int ret; + float32 s0, s1; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + s0 = d->XMM_S(0); + s1 = s->XMM_S(0); + ret = float32_compare_quiet(s0, s1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; + FORCE_RET(); +} + +void OPPROTO op_comiss(void) +{ + int ret; + float32 s0, s1; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + s0 = d->XMM_S(0); + s1 = s->XMM_S(0); + ret = float32_compare(s0, s1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; + FORCE_RET(); +} + +void OPPROTO op_ucomisd(void) +{ + int ret; + float64 d0, d1; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + d0 = d->XMM_D(0); + d1 = s->XMM_D(0); + ret = float64_compare_quiet(d0, d1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; + FORCE_RET(); +} + +void OPPROTO op_comisd(void) +{ + int ret; + float64 d0, d1; + Reg *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + d0 = d->XMM_D(0); + d1 = s->XMM_D(0); + ret = float64_compare(d0, d1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; + FORCE_RET(); +} + +void OPPROTO op_movmskps(void) +{ + int b0, b1, b2, b3; + Reg *s; + s = (Reg *)((char *)env + PARAM1); + b0 = s->XMM_L(0) >> 31; + b1 = s->XMM_L(1) >> 31; + b2 = s->XMM_L(2) >> 31; + b3 = s->XMM_L(3) >> 31; + T0 = b0 | (b1 << 1) | (b2 << 2) | (b3 << 3); +} + +void OPPROTO op_movmskpd(void) +{ + int b0, b1; + Reg *s; + s = (Reg *)((char *)env + PARAM1); + b0 = s->XMM_L(1) >> 31; + b1 = s->XMM_L(3) >> 31; + T0 = b0 | (b1 << 1); +} + +#endif + +void OPPROTO glue(op_pmovmskb, SUFFIX)(void) +{ + Reg *s; + s = (Reg *)((char *)env + PARAM1); + T0 = 0; + T0 |= (s->XMM_B(0) >> 7); + T0 |= (s->XMM_B(1) >> 6) & 0x02; + T0 |= (s->XMM_B(2) >> 5) & 0x04; + T0 |= (s->XMM_B(3) >> 4) & 0x08; + T0 |= (s->XMM_B(4) >> 3) & 0x10; + T0 |= (s->XMM_B(5) >> 2) & 0x20; + T0 |= (s->XMM_B(6) >> 1) & 0x40; + T0 |= (s->XMM_B(7)) & 0x80; +#if SHIFT == 1 + T0 |= (s->XMM_B(8) << 1) & 0x0100; + T0 |= (s->XMM_B(9) << 2) & 0x0200; + T0 |= (s->XMM_B(10) << 3) & 0x0400; + T0 |= (s->XMM_B(11) << 4) & 0x0800; + T0 |= (s->XMM_B(12) << 5) & 0x1000; + T0 |= (s->XMM_B(13) << 6) & 0x2000; + T0 |= (s->XMM_B(14) << 7) & 0x4000; + T0 |= (s->XMM_B(15) << 8) & 0x8000; +#endif +} + +void OPPROTO glue(op_pinsrw, SUFFIX) (void) +{ + Reg *d = (Reg *)((char *)env + PARAM1); + int pos = PARAM2; + + d->W(pos) = T0; +} + +void OPPROTO glue(op_pextrw, SUFFIX) (void) +{ + Reg *s = (Reg *)((char *)env + PARAM1); + int pos = PARAM2; + + T0 = s->W(pos); +} + +void OPPROTO glue(op_packsswb, SUFFIX) (void) +{ + Reg r, *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + r.B(0) = satsb((int16_t)d->W(0)); + r.B(1) = satsb((int16_t)d->W(1)); + r.B(2) = satsb((int16_t)d->W(2)); + r.B(3) = satsb((int16_t)d->W(3)); +#if SHIFT == 1 + r.B(4) = satsb((int16_t)d->W(4)); + r.B(5) = satsb((int16_t)d->W(5)); + r.B(6) = satsb((int16_t)d->W(6)); + r.B(7) = satsb((int16_t)d->W(7)); +#endif + r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0)); + r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1)); + r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2)); + r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3)); +#if SHIFT == 1 + r.B(12) = satsb((int16_t)s->W(4)); + r.B(13) = satsb((int16_t)s->W(5)); + r.B(14) = satsb((int16_t)s->W(6)); + r.B(15) = satsb((int16_t)s->W(7)); +#endif + *d = r; +} + +void OPPROTO glue(op_packuswb, SUFFIX) (void) +{ + Reg r, *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + r.B(0) = satub((int16_t)d->W(0)); + r.B(1) = satub((int16_t)d->W(1)); + r.B(2) = satub((int16_t)d->W(2)); + r.B(3) = satub((int16_t)d->W(3)); +#if SHIFT == 1 + r.B(4) = satub((int16_t)d->W(4)); + r.B(5) = satub((int16_t)d->W(5)); + r.B(6) = satub((int16_t)d->W(6)); + r.B(7) = satub((int16_t)d->W(7)); +#endif + r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0)); + r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1)); + r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2)); + r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3)); +#if SHIFT == 1 + r.B(12) = satub((int16_t)s->W(4)); + r.B(13) = satub((int16_t)s->W(5)); + r.B(14) = satub((int16_t)s->W(6)); + r.B(15) = satub((int16_t)s->W(7)); +#endif + *d = r; +} + +void OPPROTO glue(op_packssdw, SUFFIX) (void) +{ + Reg r, *d, *s; + d = (Reg *)((char *)env + PARAM1); + s = (Reg *)((char *)env + PARAM2); + + r.W(0) = satsw(d->L(0)); + r.W(1) = satsw(d->L(1)); +#if SHIFT == 1 + r.W(2) = satsw(d->L(2)); + r.W(3) = satsw(d->L(3)); +#endif + r.W((2 << SHIFT) + 0) = satsw(s->L(0)); + r.W((2 << SHIFT) + 1) = satsw(s->L(1)); +#if SHIFT == 1 + r.W(6) = satsw(s->L(2)); + r.W(7) = satsw(s->L(3)); +#endif + *d = r; +} + +#define UNPCK_OP(base_name, base) \ + \ +void OPPROTO glue(op_punpck ## base_name ## bw, SUFFIX) (void) \ +{ \ + Reg r, *d, *s; \ + d = (Reg *)((char *)env + PARAM1); \ + s = (Reg *)((char *)env + PARAM2); \ + \ + r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ + r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ + r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ + r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ + r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ + r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ + r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ + r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ +XMM_ONLY( \ + r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ + r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ + r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ + r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ + r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ + r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ + r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ + r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ +) \ + *d = r; \ +} \ + \ +void OPPROTO glue(op_punpck ## base_name ## wd, SUFFIX) (void) \ +{ \ + Reg r, *d, *s; \ + d = (Reg *)((char *)env + PARAM1); \ + s = (Reg *)((char *)env + PARAM2); \ + \ + r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ + r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ + r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ + r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ +XMM_ONLY( \ + r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ + r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ + r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ + r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ +) \ + *d = r; \ +} \ + \ +void OPPROTO glue(op_punpck ## base_name ## dq, SUFFIX) (void) \ +{ \ + Reg r, *d, *s; \ + d = (Reg *)((char *)env + PARAM1); \ + s = (Reg *)((char *)env + PARAM2); \ + \ + r.L(0) = d->L((base << SHIFT) + 0); \ + r.L(1) = s->L((base << SHIFT) + 0); \ +XMM_ONLY( \ + r.L(2) = d->L((base << SHIFT) + 1); \ + r.L(3) = s->L((base << SHIFT) + 1); \ +) \ + *d = r; \ +} \ + \ +XMM_ONLY( \ +void OPPROTO glue(op_punpck ## base_name ## qdq, SUFFIX) (void) \ +{ \ + Reg r, *d, *s; \ + d = (Reg *)((char *)env + PARAM1); \ + s = (Reg *)((char *)env + PARAM2); \ + \ + r.Q(0) = d->Q(base); \ + r.Q(1) = s->Q(base); \ + *d = r; \ +} \ +) + +UNPCK_OP(l, 0) +UNPCK_OP(h, 1) + +#undef SHIFT +#undef XMM_ONLY +#undef Reg +#undef B +#undef W +#undef L +#undef Q +#undef SUFFIX diff --git a/target-i386/ops_template.h b/target-i386/ops_template.h new file mode 100644 index 0000000..373b77a --- /dev/null +++ b/target-i386/ops_template.h @@ -0,0 +1,597 @@ +/* + * i386 micro operations (included several times to generate + * different operand sizes) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#define DATA_BITS (1 << (3 + SHIFT)) +#define SHIFT_MASK (DATA_BITS - 1) +#define SIGN_MASK (((target_ulong)1) << (DATA_BITS - 1)) +#if DATA_BITS <= 32 +#define SHIFT1_MASK 0x1f +#else +#define SHIFT1_MASK 0x3f +#endif + +#if DATA_BITS == 8 +#define SUFFIX b +#define DATA_TYPE uint8_t +#define DATA_STYPE int8_t +#define DATA_MASK 0xff +#elif DATA_BITS == 16 +#define SUFFIX w +#define DATA_TYPE uint16_t +#define DATA_STYPE int16_t +#define DATA_MASK 0xffff +#elif DATA_BITS == 32 +#define SUFFIX l +#define DATA_TYPE uint32_t +#define DATA_STYPE int32_t +#define DATA_MASK 0xffffffff +#elif DATA_BITS == 64 +#define SUFFIX q +#define DATA_TYPE uint64_t +#define DATA_STYPE int64_t +#define DATA_MASK 0xffffffffffffffffULL +#else +#error unhandled operand size +#endif + +/* dynamic flags computation */ + +static int glue(compute_all_add, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_SRC; + src2 = CC_DST - CC_SRC; + cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2 ^ -1) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_add, SUFFIX)(void) +{ + int cf; + target_long src1; + src1 = CC_SRC; + cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1; + return cf; +} + +static int glue(compute_all_adc, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_SRC; + src2 = CC_DST - CC_SRC - 1; + cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2 ^ -1) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_adc, SUFFIX)(void) +{ + int cf; + target_long src1; + src1 = CC_SRC; + cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1; + return cf; +} + +static int glue(compute_all_sub, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_sub, SUFFIX)(void) +{ + int cf; + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + cf = (DATA_TYPE)src1 < (DATA_TYPE)src2; + return cf; +} + +static int glue(compute_all_sbb, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_DST + CC_SRC + 1; + src2 = CC_SRC; + cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_sbb, SUFFIX)(void) +{ + int cf; + target_long src1, src2; + src1 = CC_DST + CC_SRC + 1; + src2 = CC_SRC; + cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2; + return cf; +} + +static int glue(compute_all_logic, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + cf = 0; + pf = parity_table[(uint8_t)CC_DST]; + af = 0; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = 0; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_logic, SUFFIX)(void) +{ + return 0; +} + +static int glue(compute_all_inc, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_DST - 1; + src2 = 1; + cf = CC_SRC; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = ((CC_DST & DATA_MASK) == SIGN_MASK) << 11; + return cf | pf | af | zf | sf | of; +} + +#if DATA_BITS == 32 +static int glue(compute_c_inc, SUFFIX)(void) +{ + return CC_SRC; +} +#endif + +static int glue(compute_all_dec, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + target_long src1, src2; + src1 = CC_DST + 1; + src2 = 1; + cf = CC_SRC; + pf = parity_table[(uint8_t)CC_DST]; + af = (CC_DST ^ src1 ^ src2) & 0x10; + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = ((CC_DST & DATA_MASK) == ((target_ulong)SIGN_MASK - 1)) << 11; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_all_shl, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + cf = (CC_SRC >> (DATA_BITS - 1)) & CC_C; + pf = parity_table[(uint8_t)CC_DST]; + af = 0; /* undefined */ + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + /* of is defined if shift count == 1 */ + of = lshift(CC_SRC ^ CC_DST, 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_shl, SUFFIX)(void) +{ + return (CC_SRC >> (DATA_BITS - 1)) & CC_C; +} + +#if DATA_BITS == 32 +static int glue(compute_c_sar, SUFFIX)(void) +{ + return CC_SRC & 1; +} +#endif + +static int glue(compute_all_sar, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + cf = CC_SRC & 1; + pf = parity_table[(uint8_t)CC_DST]; + af = 0; /* undefined */ + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + /* of is defined if shift count == 1 */ + of = lshift(CC_SRC ^ CC_DST, 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +#if DATA_BITS == 32 +static int glue(compute_c_mul, SUFFIX)(void) +{ + int cf; + cf = (CC_SRC != 0); + return cf; +} +#endif + +/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and + CF are modified and it is slower to do that. */ +static int glue(compute_all_mul, SUFFIX)(void) +{ + int cf, pf, af, zf, sf, of; + cf = (CC_SRC != 0); + pf = parity_table[(uint8_t)CC_DST]; + af = 0; /* undefined */ + zf = ((DATA_TYPE)CC_DST == 0) << 6; + sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80; + of = cf << 11; + return cf | pf | af | zf | sf | of; +} + +/* various optimized jumps cases */ + +void OPPROTO glue(op_jb_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + if ((DATA_TYPE)src1 < (DATA_TYPE)src2) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jz_sub, SUFFIX)(void) +{ + if ((DATA_TYPE)CC_DST == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jnz_sub, SUFFIX)(void) +{ + if ((DATA_TYPE)CC_DST != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jbe_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + if ((DATA_TYPE)src1 <= (DATA_TYPE)src2) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_js_sub, SUFFIX)(void) +{ + if (CC_DST & SIGN_MASK) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jl_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + if ((DATA_STYPE)src1 < (DATA_STYPE)src2) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jle_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + if ((DATA_STYPE)src1 <= (DATA_STYPE)src2) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +/* oldies */ + +#if DATA_BITS >= 16 + +void OPPROTO glue(op_loopnz, SUFFIX)(void) +{ + if ((DATA_TYPE)ECX != 0 && !(T0 & CC_Z)) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_loopz, SUFFIX)(void) +{ + if ((DATA_TYPE)ECX != 0 && (T0 & CC_Z)) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jz_ecx, SUFFIX)(void) +{ + if ((DATA_TYPE)ECX == 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +void OPPROTO glue(op_jnz_ecx, SUFFIX)(void) +{ + if ((DATA_TYPE)ECX != 0) + GOTO_LABEL_PARAM(1); + FORCE_RET(); +} + +#endif + +/* various optimized set cases */ + +void OPPROTO glue(op_setb_T0_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + T0 = ((DATA_TYPE)src1 < (DATA_TYPE)src2); +} + +void OPPROTO glue(op_setz_T0_sub, SUFFIX)(void) +{ + T0 = ((DATA_TYPE)CC_DST == 0); +} + +void OPPROTO glue(op_setbe_T0_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + T0 = ((DATA_TYPE)src1 <= (DATA_TYPE)src2); +} + +void OPPROTO glue(op_sets_T0_sub, SUFFIX)(void) +{ + T0 = lshift(CC_DST, -(DATA_BITS - 1)) & 1; +} + +void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + T0 = ((DATA_STYPE)src1 < (DATA_STYPE)src2); +} + +void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void) +{ + target_long src1, src2; + src1 = CC_DST + CC_SRC; + src2 = CC_SRC; + + T0 = ((DATA_STYPE)src1 <= (DATA_STYPE)src2); +} + +/* shifts */ + +void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void) +{ + int count; + count = T1 & SHIFT1_MASK; + T0 = T0 << count; + FORCE_RET(); +} + +void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void) +{ + int count; + count = T1 & SHIFT1_MASK; + T0 &= DATA_MASK; + T0 = T0 >> count; + FORCE_RET(); +} + +void OPPROTO glue(glue(op_sar, SUFFIX), _T0_T1)(void) +{ + int count; + target_long src; + + count = T1 & SHIFT1_MASK; + src = (DATA_STYPE)T0; + T0 = src >> count; + FORCE_RET(); +} + +#undef MEM_WRITE +#include "ops_template_mem.h" + +#define MEM_WRITE 0 +#include "ops_template_mem.h" + +#if !defined(CONFIG_USER_ONLY) +#define MEM_WRITE 1 +#include "ops_template_mem.h" + +#define MEM_WRITE 2 +#include "ops_template_mem.h" +#endif + +/* bit operations */ +#if DATA_BITS >= 16 + +void OPPROTO glue(glue(op_bt, SUFFIX), _T0_T1_cc)(void) +{ + int count; + count = T1 & SHIFT_MASK; + CC_SRC = T0 >> count; +} + +void OPPROTO glue(glue(op_bts, SUFFIX), _T0_T1_cc)(void) +{ + int count; + count = T1 & SHIFT_MASK; + T1 = T0 >> count; + T0 |= (((target_long)1) << count); +} + +void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void) +{ + int count; + count = T1 & SHIFT_MASK; + T1 = T0 >> count; + T0 &= ~(((target_long)1) << count); +} + +void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void) +{ + int count; + count = T1 & SHIFT_MASK; + T1 = T0 >> count; + T0 ^= (((target_long)1) << count); +} + +void OPPROTO glue(glue(op_add_bit, SUFFIX), _A0_T1)(void) +{ + A0 += ((DATA_STYPE)T1 >> (3 + SHIFT)) << SHIFT; +} + +void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void) +{ + int count; + target_long res; + + res = T0 & DATA_MASK; + if (res != 0) { + count = 0; + while ((res & 1) == 0) { + count++; + res >>= 1; + } + T1 = count; + CC_DST = 1; /* ZF = 0 */ + } else { + CC_DST = 0; /* ZF = 1 */ + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_bsr, SUFFIX), _T0_cc)(void) +{ + int count; + target_long res; + + res = T0 & DATA_MASK; + if (res != 0) { + count = DATA_BITS - 1; + while ((res & SIGN_MASK) == 0) { + count--; + res <<= 1; + } + T1 = count; + CC_DST = 1; /* ZF = 0 */ + } else { + CC_DST = 0; /* ZF = 1 */ + } + FORCE_RET(); +} + +#endif + +#if DATA_BITS == 32 +void OPPROTO op_update_bt_cc(void) +{ + CC_SRC = T1; +} +#endif + +/* string operations */ + +void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void) +{ + T0 = DF << SHIFT; +} + +/* port I/O */ +#if DATA_BITS <= 32 +void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void) +{ + glue(cpu_out, SUFFIX)(env, T0, T1 & DATA_MASK); +} + +void OPPROTO glue(glue(op_in, SUFFIX), _T0_T1)(void) +{ + T1 = glue(cpu_in, SUFFIX)(env, T0); +} + +void OPPROTO glue(glue(op_in, SUFFIX), _DX_T0)(void) +{ + T0 = glue(cpu_in, SUFFIX)(env, EDX & 0xffff); +} + +void OPPROTO glue(glue(op_out, SUFFIX), _DX_T0)(void) +{ + glue(cpu_out, SUFFIX)(env, EDX & 0xffff, T0); +} + +void OPPROTO glue(glue(op_check_io, SUFFIX), _T0)(void) +{ + glue(glue(check_io, SUFFIX), _T0)(); +} + +void OPPROTO glue(glue(op_check_io, SUFFIX), _DX)(void) +{ + glue(glue(check_io, SUFFIX), _DX)(); +} +#endif + +#undef DATA_BITS +#undef SHIFT_MASK +#undef SHIFT1_MASK +#undef SIGN_MASK +#undef DATA_TYPE +#undef DATA_STYPE +#undef DATA_MASK +#undef SUFFIX diff --git a/target-i386/ops_template_mem.h b/target-i386/ops_template_mem.h new file mode 100644 index 0000000..9f72a8c --- /dev/null +++ b/target-i386/ops_template_mem.h @@ -0,0 +1,483 @@ +/* + * i386 micro operations (included several times to generate + * different operand sizes) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifdef MEM_WRITE + +#if MEM_WRITE == 0 + +#if DATA_BITS == 8 +#define MEM_SUFFIX b_raw +#elif DATA_BITS == 16 +#define MEM_SUFFIX w_raw +#elif DATA_BITS == 32 +#define MEM_SUFFIX l_raw +#elif DATA_BITS == 64 +#define MEM_SUFFIX q_raw +#endif + +#elif MEM_WRITE == 1 + +#if DATA_BITS == 8 +#define MEM_SUFFIX b_kernel +#elif DATA_BITS == 16 +#define MEM_SUFFIX w_kernel +#elif DATA_BITS == 32 +#define MEM_SUFFIX l_kernel +#elif DATA_BITS == 64 +#define MEM_SUFFIX q_kernel +#endif + +#elif MEM_WRITE == 2 + +#if DATA_BITS == 8 +#define MEM_SUFFIX b_user +#elif DATA_BITS == 16 +#define MEM_SUFFIX w_user +#elif DATA_BITS == 32 +#define MEM_SUFFIX l_user +#elif DATA_BITS == 64 +#define MEM_SUFFIX q_user +#endif + +#else + +#error invalid MEM_WRITE + +#endif + +#else + +#define MEM_SUFFIX SUFFIX + +#endif + +void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count; + target_long src; + + if (T1 & SHIFT1_MASK) { + count = T1 & SHIFT_MASK; + src = T0; + T0 &= DATA_MASK; + T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#else + /* gcc 3.2 workaround. This is really a bug in gcc. */ + asm volatile("" : : "r" (T0)); +#endif + CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) | + (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | + (T0 & CC_C); + CC_OP = CC_OP_EFLAGS; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count; + target_long src; + + if (T1 & SHIFT1_MASK) { + count = T1 & SHIFT_MASK; + src = T0; + T0 &= DATA_MASK; + T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#else + /* gcc 3.2 workaround. This is really a bug in gcc. */ + asm volatile("" : : "r" (T0)); +#endif + CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) | + (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | + ((T0 >> (DATA_BITS - 1)) & CC_C); + CC_OP = CC_OP_EFLAGS; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1)(void) +{ + int count; + count = T1 & SHIFT_MASK; + if (count) { + T0 &= DATA_MASK; + T0 = (T0 << count) | (T0 >> (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void) +{ + int count; + count = T1 & SHIFT_MASK; + if (count) { + T0 &= DATA_MASK; + T0 = (T0 >> count) | (T0 << (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = T1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = cc_table[CC_OP].compute_all(); + T0 &= DATA_MASK; + src = T0; + res = (T0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1)); + if (count > 1) + res |= T0 >> (DATA_BITS + 1 - count); + T0 = res; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (DATA_BITS - count)) & CC_C); + CC_OP = CC_OP_EFLAGS; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = T1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = cc_table[CC_OP].compute_all(); + T0 &= DATA_MASK; + src = T0; + res = (T0 >> count) | ((target_ulong)(eflags & CC_C) << (DATA_BITS - count)); + if (count > 1) + res |= T0 << (DATA_BITS + 1 - count); + T0 = res; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (count - 1)) & CC_C); + CC_OP = CC_OP_EFLAGS; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count; + target_long src; + + count = T1 & SHIFT1_MASK; + if (count) { + src = (DATA_TYPE)T0 << (count - 1); + T0 = T0 << count; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = src; + CC_DST = T0; + CC_OP = CC_OP_SHLB + SHIFT; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count; + target_long src; + + count = T1 & SHIFT1_MASK; + if (count) { + T0 &= DATA_MASK; + src = T0 >> (count - 1); + T0 = T0 >> count; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = src; + CC_DST = T0; + CC_OP = CC_OP_SARB + SHIFT; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_sar, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int count; + target_long src; + + count = T1 & SHIFT1_MASK; + if (count) { + src = (DATA_STYPE)T0; + T0 = src >> count; + src = src >> (count - 1); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = src; + CC_DST = T0; + CC_OP = CC_OP_SARB + SHIFT; + } + FORCE_RET(); +} + +#if DATA_BITS == 16 +/* XXX: overflow flag might be incorrect in some cases in shldw */ +void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) +{ + int count; + unsigned int res, tmp; + count = PARAM1; + T1 &= 0xffff; + res = T1 | (T0 << 16); + tmp = res >> (32 - count); + res <<= count; + if (count > 16) + res |= T1 << (count - 16); + T0 = res >> 16; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; +} + +void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) +{ + int count; + unsigned int res, tmp; + count = ECX & 0x1f; + if (count) { + T1 &= 0xffff; + res = T1 | (T0 << 16); + tmp = res >> (32 - count); + res <<= count; + if (count > 16) + res |= T1 << (count - 16); + T0 = res >> 16; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; + CC_OP = CC_OP_SARB + SHIFT; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) +{ + int count; + unsigned int res, tmp; + + count = PARAM1; + res = (T0 & 0xffff) | (T1 << 16); + tmp = res >> (count - 1); + res >>= count; + if (count > 16) + res |= T1 << (32 - count); + T0 = res; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; +} + + +void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) +{ + int count; + unsigned int res, tmp; + + count = ECX & 0x1f; + if (count) { + res = (T0 & 0xffff) | (T1 << 16); + tmp = res >> (count - 1); + res >>= count; + if (count > 16) + res |= T1 << (32 - count); + T0 = res; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; + CC_OP = CC_OP_SARB + SHIFT; + } + FORCE_RET(); +} +#endif + +#if DATA_BITS >= 32 +void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void) +{ + int count; + target_long tmp; + + count = PARAM1; + T0 &= DATA_MASK; + T1 &= DATA_MASK; + tmp = T0 << (count - 1); + T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; +} + +void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void) +{ + int count; + target_long tmp; + + count = ECX & SHIFT1_MASK; + if (count) { + T0 &= DATA_MASK; + T1 &= DATA_MASK; + tmp = T0 << (count - 1); + T0 = (T0 << count) | (T1 >> (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; + CC_OP = CC_OP_SHLB + SHIFT; + } + FORCE_RET(); +} + +void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void) +{ + int count; + target_long tmp; + + count = PARAM1; + T0 &= DATA_MASK; + T1 &= DATA_MASK; + tmp = T0 >> (count - 1); + T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; +} + + +void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void) +{ + int count; + target_long tmp; + + count = ECX & SHIFT1_MASK; + if (count) { + T0 &= DATA_MASK; + T1 &= DATA_MASK; + tmp = T0 >> (count - 1); + T0 = (T0 >> count) | (T1 << (DATA_BITS - count)); +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = tmp; + CC_DST = T0; + CC_OP = CC_OP_SARB + SHIFT; + } + FORCE_RET(); +} +#endif + +/* carry add/sub (we only need to set CC_OP differently) */ + +void OPPROTO glue(glue(op_adc, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int cf; + cf = cc_table[CC_OP].compute_c(); + T0 = T0 + T1 + cf; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = T1; + CC_DST = T0; + CC_OP = CC_OP_ADDB + SHIFT + cf * 4; +} + +void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void) +{ + int cf; + cf = cc_table[CC_OP].compute_c(); + T0 = T0 - T1 - cf; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + CC_SRC = T1; + CC_DST = T0; + CC_OP = CC_OP_SUBB + SHIFT + cf * 4; +} + +void OPPROTO glue(glue(op_cmpxchg, MEM_SUFFIX), _T0_T1_EAX_cc)(void) +{ + target_ulong src, dst; + + src = T0; + dst = EAX - T0; + if ((DATA_TYPE)dst == 0) { + T0 = T1; +#ifdef MEM_WRITE + glue(st, MEM_SUFFIX)(A0, T0); +#endif + } else { + EAX = (EAX & ~DATA_MASK) | (T0 & DATA_MASK); + } + CC_SRC = src; + CC_DST = dst; + FORCE_RET(); +} + +#undef MEM_SUFFIX +#undef MEM_WRITE diff --git a/target-i386/translate-copy.c b/target-i386/translate-copy.c new file mode 100644 index 0000000..cf8bd5a --- /dev/null +++ b/target-i386/translate-copy.c @@ -0,0 +1,1323 @@ +/* + * i386 on i386 translation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include "config.h" + +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <assert.h> + +#include "cpu.h" +#include "exec-all.h" +#include "disas.h" + +#ifdef USE_CODE_COPY + +#include <signal.h> +#include <sys/mman.h> +#include <sys/ucontext.h> + +extern char exec_loop; + +/* operand size */ +enum { + OT_BYTE = 0, + OT_WORD, + OT_LONG, + OT_QUAD, +}; + +#define PREFIX_REPZ 0x01 +#define PREFIX_REPNZ 0x02 +#define PREFIX_LOCK 0x04 +#define PREFIX_DATA 0x08 +#define PREFIX_ADR 0x10 + +typedef struct DisasContext { + /* current insn context */ + int override; /* -1 if no override */ + int prefix; + int aflag, dflag; + target_ulong pc; /* pc = eip + cs_base */ + int is_jmp; /* 1 = means jump (stop translation), 2 means CPU + static state change (stop translation) */ + /* code output */ + uint8_t *gen_code_ptr; + uint8_t *gen_code_start; + + /* current block context */ + target_ulong cs_base; /* base of CS segment */ + int pe; /* protected mode */ + int code32; /* 32 bit code segment */ + int f_st; /* currently unused */ + int vm86; /* vm86 mode */ + int cpl; + int iopl; + int flags; + struct TranslationBlock *tb; +} DisasContext; + +#define CPU_FIELD_OFFSET(field) offsetof(CPUState, field) + +#define CPU_SEG 0x64 /* fs override */ + +static inline void gb(DisasContext *s, uint32_t val) +{ + *s->gen_code_ptr++ = val; +} + +static inline void gw(DisasContext *s, uint32_t val) +{ + *s->gen_code_ptr++ = val; + *s->gen_code_ptr++ = val >> 8; +} + +static inline void gl(DisasContext *s, uint32_t val) +{ + *s->gen_code_ptr++ = val; + *s->gen_code_ptr++ = val >> 8; + *s->gen_code_ptr++ = val >> 16; + *s->gen_code_ptr++ = val >> 24; +} + +static inline void gjmp(DisasContext *s, long val) +{ + gb(s, 0xe9); /* jmp */ + gl(s, val - (long)(s->gen_code_ptr + 4)); +} + +static inline void gen_movl_addr_im(DisasContext *s, + uint32_t addr, uint32_t val) +{ + gb(s, CPU_SEG); /* seg movl im, addr */ + gb(s, 0xc7); + gb(s, 0x05); + gl(s, addr); + gl(s, val); +} + +static inline void gen_movw_addr_im(DisasContext *s, + uint32_t addr, uint32_t val) +{ + gb(s, CPU_SEG); /* seg movl im, addr */ + gb(s, 0x66); + gb(s, 0xc7); + gb(s, 0x05); + gl(s, addr); + gw(s, val); +} + + +static void gen_jmp(DisasContext *s, uint32_t target_eip) +{ + TranslationBlock *tb = s->tb; + + gb(s, 0xe9); /* jmp */ + tb->tb_jmp_offset[0] = s->gen_code_ptr - s->gen_code_start; + gl(s, 0); + + tb->tb_next_offset[0] = s->gen_code_ptr - s->gen_code_start; + gen_movl_addr_im(s, CPU_FIELD_OFFSET(eip), target_eip); + gen_movl_addr_im(s, CPU_FIELD_OFFSET(tmp0), (uint32_t)tb); + gjmp(s, (long)&exec_loop); + + s->is_jmp = 1; +} + +static void gen_jcc(DisasContext *s, int op, + uint32_t target_eip, uint32_t next_eip) +{ + TranslationBlock *tb = s->tb; + + gb(s, 0x0f); /* jcc */ + gb(s, 0x80 + op); + tb->tb_jmp_offset[0] = s->gen_code_ptr - s->gen_code_start; + gl(s, 0); + gb(s, 0xe9); /* jmp */ + tb->tb_jmp_offset[1] = s->gen_code_ptr - s->gen_code_start; + gl(s, 0); + + tb->tb_next_offset[0] = s->gen_code_ptr - s->gen_code_start; + gen_movl_addr_im(s, CPU_FIELD_OFFSET(eip), target_eip); + gen_movl_addr_im(s, CPU_FIELD_OFFSET(tmp0), (uint32_t)tb); + gjmp(s, (long)&exec_loop); + + tb->tb_next_offset[1] = s->gen_code_ptr - s->gen_code_start; + gen_movl_addr_im(s, CPU_FIELD_OFFSET(eip), next_eip); + gen_movl_addr_im(s, CPU_FIELD_OFFSET(tmp0), (uint32_t)tb | 1); + gjmp(s, (long)&exec_loop); + + s->is_jmp = 1; +} + +static void gen_eob(DisasContext *s) +{ + gen_movl_addr_im(s, CPU_FIELD_OFFSET(tmp0), 0); + gjmp(s, (long)&exec_loop); + + s->is_jmp = 1; +} + +static inline void gen_lea_modrm(DisasContext *s, int modrm) +{ + int havesib; + int base, disp; + int index; + int scale; + int mod, rm, code; + + mod = (modrm >> 6) & 3; + rm = modrm & 7; + + if (s->aflag) { + + havesib = 0; + base = rm; + index = 0; + scale = 0; + + if (base == 4) { + havesib = 1; + code = ldub_code(s->pc++); + scale = (code >> 6) & 3; + index = (code >> 3) & 7; + base = code & 7; + } + + switch (mod) { + case 0: + if (base == 5) { + base = -1; + disp = ldl_code(s->pc); + s->pc += 4; + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)ldub_code(s->pc++); + break; + default: + case 2: + disp = ldl_code(s->pc); + s->pc += 4; + break; + } + + } else { + switch (mod) { + case 0: + if (rm == 6) { + disp = lduw_code(s->pc); + s->pc += 2; + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)ldub_code(s->pc++); + break; + default: + case 2: + disp = lduw_code(s->pc); + s->pc += 2; + break; + } + } +} + +static inline void parse_modrm(DisasContext *s, int modrm) +{ + if ((modrm & 0xc0) != 0xc0) + gen_lea_modrm(s, modrm); +} + +static inline uint32_t insn_get(DisasContext *s, int ot) +{ + uint32_t ret; + + switch(ot) { + case OT_BYTE: + ret = ldub_code(s->pc); + s->pc++; + break; + case OT_WORD: + ret = lduw_code(s->pc); + s->pc += 2; + break; + default: + case OT_LONG: + ret = ldl_code(s->pc); + s->pc += 4; + break; + } + return ret; +} + +/* convert one instruction. s->is_jmp is set if the translation must + be stopped. */ +static int disas_insn(DisasContext *s) +{ + target_ulong pc_start, pc_tmp, pc_start_insn; + int b, prefixes, aflag, dflag, next_eip, val; + int ot; + int modrm, mod, op, rm; + + pc_start = s->pc; + prefixes = 0; + aflag = s->code32; + dflag = s->code32; + s->override = -1; + next_byte: + b = ldub_code(s->pc); + s->pc++; + /* check prefixes */ + switch (b) { + case 0xf3: + prefixes |= PREFIX_REPZ; + goto next_byte; + case 0xf2: + prefixes |= PREFIX_REPNZ; + goto next_byte; + case 0xf0: + prefixes |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + prefixes |= PREFIX_DATA; + goto next_byte; + case 0x67: + prefixes |= PREFIX_ADR; + goto next_byte; + } + + if (prefixes & PREFIX_DATA) + dflag ^= 1; + if (prefixes & PREFIX_ADR) + aflag ^= 1; + + s->prefix = prefixes; + s->aflag = aflag; + s->dflag = dflag; + + /* lock generation */ + if (prefixes & PREFIX_LOCK) + goto unsupported_op; + if (s->override == R_FS || s->override == R_GS || s->override == R_CS) + goto unsupported_op; + + pc_start_insn = s->pc - 1; + /* now check op code */ + reswitch: + switch(b) { + case 0x0f: + /**************************/ + /* extended op code */ + b = ldub_code(s->pc++) | 0x100; + goto reswitch; + + /**************************/ + /* arith & logic */ + case 0x00 ... 0x05: + case 0x08 ... 0x0d: + case 0x10 ... 0x15: + case 0x18 ... 0x1d: + case 0x20 ... 0x25: + case 0x28 ... 0x2d: + case 0x30 ... 0x35: + case 0x38 ... 0x3d: + { + int f; + f = (b >> 1) & 3; + + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + + switch(f) { + case 0: /* OP Ev, Gv */ + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + break; + case 1: /* OP Gv, Ev */ + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + break; + case 2: /* OP A, Iv */ + insn_get(s, ot); + break; + } + } + break; + + case 0x80: /* GRP1 */ + case 0x81: + case 0x82: + case 0x83: + { + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + + switch(b) { + default: + case 0x80: + case 0x81: + case 0x82: + insn_get(s, ot); + break; + case 0x83: + insn_get(s, OT_BYTE); + break; + } + } + break; + + /**************************/ + /* inc, dec, and other misc arith */ + case 0x40 ... 0x47: /* inc Gv */ + break; + case 0x48 ... 0x4f: /* dec Gv */ + break; + case 0xf6: /* GRP3 */ + case 0xf7: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + + modrm = ldub_code(s->pc++); + op = (modrm >> 3) & 7; + parse_modrm(s, modrm); + + switch(op) { + case 0: /* test */ + insn_get(s, ot); + break; + case 2: /* not */ + break; + case 3: /* neg */ + break; + case 4: /* mul */ + break; + case 5: /* imul */ + break; + case 6: /* div */ + break; + case 7: /* idiv */ + break; + default: + goto illegal_op; + } + break; + + case 0xfe: /* GRP4 */ + case 0xff: /* GRP5 */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + if (op >= 2 && b == 0xfe) { + goto illegal_op; + } + pc_tmp = s->pc; + parse_modrm(s, modrm); + + switch(op) { + case 0: /* inc Ev */ + break; + case 1: /* dec Ev */ + break; + case 2: /* call Ev */ + /* XXX: optimize and handle MEM exceptions specifically + fs movl %eax, regs[0] + movl Ev, %eax + pushl next_eip + fs movl %eax, eip + */ + goto unsupported_op; + case 3: /* lcall Ev */ + goto unsupported_op; + case 4: /* jmp Ev */ + /* XXX: optimize and handle MEM exceptions specifically + fs movl %eax, regs[0] + movl Ev, %eax + fs movl %eax, eip + */ + goto unsupported_op; + case 5: /* ljmp Ev */ + goto unsupported_op; + case 6: /* push Ev */ + break; + default: + goto illegal_op; + } + break; + case 0xa8: /* test eAX, Iv */ + case 0xa9: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + insn_get(s, ot); + break; + + case 0x98: /* CWDE/CBW */ + break; + case 0x99: /* CDQ/CWD */ + break; + case 0x1af: /* imul Gv, Ev */ + case 0x69: /* imul Gv, Ev, I */ + case 0x6b: + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + if (b == 0x69) { + insn_get(s, ot); + } else if (b == 0x6b) { + insn_get(s, OT_BYTE); + } else { + } + break; + + case 0x84: /* test Ev, Gv */ + case 0x85: + + case 0x1c0: + case 0x1c1: /* xadd Ev, Gv */ + + case 0x1b0: + case 0x1b1: /* cmpxchg Ev, Gv */ + + case 0x8f: /* pop Ev */ + + case 0x88: + case 0x89: /* mov Gv, Ev */ + + case 0x8a: + case 0x8b: /* mov Ev, Gv */ + + case 0x1b6: /* movzbS Gv, Eb */ + case 0x1b7: /* movzwS Gv, Eb */ + case 0x1be: /* movsbS Gv, Eb */ + case 0x1bf: /* movswS Gv, Eb */ + + case 0x86: + case 0x87: /* xchg Ev, Gv */ + + case 0xd0: + case 0xd1: /* shift Ev,1 */ + + case 0xd2: + case 0xd3: /* shift Ev,cl */ + + case 0x1a5: /* shld cl */ + case 0x1ad: /* shrd cl */ + + case 0x190 ... 0x19f: /* setcc Gv */ + + /* XXX: emulate cmov if not available ? */ + case 0x140 ... 0x14f: /* cmov Gv, Ev */ + + case 0x1a3: /* bt Gv, Ev */ + case 0x1ab: /* bts */ + case 0x1b3: /* btr */ + case 0x1bb: /* btc */ + + case 0x1bc: /* bsf */ + case 0x1bd: /* bsr */ + + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + break; + + case 0x1c7: /* cmpxchg8b */ + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + parse_modrm(s, modrm); + break; + + /**************************/ + /* push/pop */ + case 0x50 ... 0x57: /* push */ + case 0x58 ... 0x5f: /* pop */ + case 0x60: /* pusha */ + case 0x61: /* popa */ + break; + + case 0x68: /* push Iv */ + case 0x6a: + ot = dflag ? OT_LONG : OT_WORD; + if (b == 0x68) + insn_get(s, ot); + else + insn_get(s, OT_BYTE); + break; + case 0xc8: /* enter */ + lduw_code(s->pc); + s->pc += 2; + ldub_code(s->pc++); + break; + case 0xc9: /* leave */ + break; + + case 0x06: /* push es */ + case 0x0e: /* push cs */ + case 0x16: /* push ss */ + case 0x1e: /* push ds */ + /* XXX: optimize: + push segs[n].selector + */ + goto unsupported_op; + case 0x1a0: /* push fs */ + case 0x1a8: /* push gs */ + goto unsupported_op; + case 0x07: /* pop es */ + case 0x17: /* pop ss */ + case 0x1f: /* pop ds */ + goto unsupported_op; + case 0x1a1: /* pop fs */ + case 0x1a9: /* pop gs */ + goto unsupported_op; + case 0x8e: /* mov seg, Gv */ + /* XXX: optimize: + fs movl r, regs[] + movl segs[].selector, r + mov r, Gv + fs movl regs[], r + */ + goto unsupported_op; + case 0x8c: /* mov Gv, seg */ + goto unsupported_op; + case 0xc4: /* les Gv */ + op = R_ES; + goto do_lxx; + case 0xc5: /* lds Gv */ + op = R_DS; + goto do_lxx; + case 0x1b2: /* lss Gv */ + op = R_SS; + goto do_lxx; + case 0x1b4: /* lfs Gv */ + op = R_FS; + goto do_lxx; + case 0x1b5: /* lgs Gv */ + op = R_GS; + do_lxx: + goto unsupported_op; + /************************/ + /* floats */ + case 0xd8 ... 0xdf: +#if 1 + /* currently not stable enough */ + goto unsupported_op; +#else + if (s->flags & (HF_EM_MASK | HF_TS_MASK)) + goto unsupported_op; +#endif +#if 0 + /* for testing FPU context switch */ + { + static int count; + count = (count + 1) % 3; + if (count != 0) + goto unsupported_op; + } +#endif + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = modrm & 7; + op = ((b & 7) << 3) | ((modrm >> 3) & 7); + if (mod != 3) { + /* memory op */ + parse_modrm(s, modrm); + switch(op) { + case 0x00 ... 0x07: /* fxxxs */ + case 0x10 ... 0x17: /* fixxxl */ + case 0x20 ... 0x27: /* fxxxl */ + case 0x30 ... 0x37: /* fixxx */ + break; + case 0x08: /* flds */ + case 0x0a: /* fsts */ + case 0x0b: /* fstps */ + case 0x18: /* fildl */ + case 0x1a: /* fistl */ + case 0x1b: /* fistpl */ + case 0x28: /* fldl */ + case 0x2a: /* fstl */ + case 0x2b: /* fstpl */ + case 0x38: /* filds */ + case 0x3a: /* fists */ + case 0x3b: /* fistps */ + case 0x0c: /* fldenv mem */ + case 0x0d: /* fldcw mem */ + case 0x0e: /* fnstenv mem */ + case 0x0f: /* fnstcw mem */ + case 0x1d: /* fldt mem */ + case 0x1f: /* fstpt mem */ + case 0x2c: /* frstor mem */ + case 0x2e: /* fnsave mem */ + case 0x2f: /* fnstsw mem */ + case 0x3c: /* fbld */ + case 0x3e: /* fbstp */ + case 0x3d: /* fildll */ + case 0x3f: /* fistpll */ + break; + default: + goto illegal_op; + } + } else { + /* register float ops */ + switch(op) { + case 0x08: /* fld sti */ + case 0x09: /* fxchg sti */ + break; + case 0x0a: /* grp d9/2 */ + switch(rm) { + case 0: /* fnop */ + break; + default: + goto illegal_op; + } + break; + case 0x0c: /* grp d9/4 */ + switch(rm) { + case 0: /* fchs */ + case 1: /* fabs */ + case 4: /* ftst */ + case 5: /* fxam */ + break; + default: + goto illegal_op; + } + break; + case 0x0d: /* grp d9/5 */ + switch(rm) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + default: + goto illegal_op; + } + break; + case 0x0e: /* grp d9/6 */ + break; + case 0x0f: /* grp d9/7 */ + break; + case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ + case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ + case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ + break; + case 0x02: /* fcom */ + break; + case 0x03: /* fcomp */ + break; + case 0x15: /* da/5 */ + switch(rm) { + case 1: /* fucompp */ + break; + default: + goto illegal_op; + } + break; + case 0x1c: + switch(rm) { + case 0: /* feni (287 only, just do nop here) */ + case 1: /* fdisi (287 only, just do nop here) */ + goto unsupported_op; + case 2: /* fclex */ + case 3: /* fninit */ + case 4: /* fsetpm (287 only, just do nop here) */ + break; + default: + goto illegal_op; + } + break; + case 0x1d: /* fucomi */ + break; + case 0x1e: /* fcomi */ + break; + case 0x28: /* ffree sti */ + break; + case 0x2a: /* fst sti */ + break; + case 0x2b: /* fstp sti */ + break; + case 0x2c: /* fucom st(i) */ + break; + case 0x2d: /* fucomp st(i) */ + break; + case 0x33: /* de/3 */ + switch(rm) { + case 1: /* fcompp */ + break; + default: + goto illegal_op; + } + break; + case 0x3c: /* df/4 */ + switch(rm) { + case 0: + break; + default: + goto illegal_op; + } + break; + case 0x3d: /* fucomip */ + break; + case 0x3e: /* fcomip */ + break; + case 0x10 ... 0x13: /* fcmovxx */ + case 0x18 ... 0x1b: + break; + default: + goto illegal_op; + } + } + s->tb->cflags |= CF_TB_FP_USED; + break; + + /**************************/ + /* mov */ + case 0xc6: + case 0xc7: /* mov Ev, Iv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + insn_get(s, ot); + break; + + case 0x8d: /* lea */ + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + parse_modrm(s, modrm); + break; + + case 0xa0: /* mov EAX, Ov */ + case 0xa1: + case 0xa2: /* mov Ov, EAX */ + case 0xa3: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + if (s->aflag) + insn_get(s, OT_LONG); + else + insn_get(s, OT_WORD); + break; + case 0xd7: /* xlat */ + break; + case 0xb0 ... 0xb7: /* mov R, Ib */ + insn_get(s, OT_BYTE); + break; + case 0xb8 ... 0xbf: /* mov R, Iv */ + ot = dflag ? OT_LONG : OT_WORD; + insn_get(s, ot); + break; + + case 0x91 ... 0x97: /* xchg R, EAX */ + break; + + /************************/ + /* shifts */ + case 0xc0: + case 0xc1: /* shift Ev,imm */ + + case 0x1a4: /* shld imm */ + case 0x1ac: /* shrd imm */ + modrm = ldub_code(s->pc++); + parse_modrm(s, modrm); + ldub_code(s->pc++); + break; + + /************************/ + /* string ops */ + + case 0xa4: /* movsS */ + case 0xa5: + break; + + case 0xaa: /* stosS */ + case 0xab: + break; + + case 0xac: /* lodsS */ + case 0xad: + break; + + case 0xae: /* scasS */ + case 0xaf: + break; + + case 0xa6: /* cmpsS */ + case 0xa7: + break; + + case 0x6c: /* insS */ + case 0x6d: + goto unsupported_op; + + case 0x6e: /* outsS */ + case 0x6f: + goto unsupported_op; + + /************************/ + /* port I/O */ + case 0xe4: + case 0xe5: + goto unsupported_op; + + case 0xe6: + case 0xe7: + goto unsupported_op; + + case 0xec: + case 0xed: + goto unsupported_op; + + case 0xee: + case 0xef: + goto unsupported_op; + + /************************/ + /* control */ +#if 0 + case 0xc2: /* ret im */ + val = ldsw_code(s->pc); + s->pc += 2; + gen_pop_T0(s); + gen_stack_update(s, val + (2 << s->dflag)); + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + gen_op_jmp_T0(); + gen_eob(s); + break; +#endif + + case 0xc3: /* ret */ + gb(s, CPU_SEG); + if (!s->dflag) + gb(s, 0x66); /* d16 */ + gb(s, 0x8f); /* pop addr */ + gb(s, 0x05); + gl(s, CPU_FIELD_OFFSET(eip)); + if (!s->dflag) { + /* reset high bits of EIP */ + gen_movw_addr_im(s, CPU_FIELD_OFFSET(eip) + 2, 0); + } + gen_eob(s); + goto no_copy; + case 0xca: /* lret im */ + case 0xcb: /* lret */ + case 0xcf: /* iret */ + case 0x9a: /* lcall im */ + case 0xea: /* ljmp im */ + goto unsupported_op; + + case 0xe8: /* call im */ + ot = dflag ? OT_LONG : OT_WORD; + val = insn_get(s, ot); + next_eip = s->pc - s->cs_base; + val += next_eip; + if (s->dflag) { + gb(s, 0x68); /* pushl imm */ + gl(s, next_eip); + } else { + gb(s, 0x66); /* pushw imm */ + gb(s, 0x68); + gw(s, next_eip); + val &= 0xffff; + } + gen_jmp(s, val); + goto no_copy; + case 0xe9: /* jmp */ + ot = dflag ? OT_LONG : OT_WORD; + val = insn_get(s, ot); + val += s->pc - s->cs_base; + if (s->dflag == 0) + val = val & 0xffff; + gen_jmp(s, val); + goto no_copy; + case 0xeb: /* jmp Jb */ + val = (int8_t)insn_get(s, OT_BYTE); + val += s->pc - s->cs_base; + if (s->dflag == 0) + val = val & 0xffff; + gen_jmp(s, val); + goto no_copy; + case 0x70 ... 0x7f: /* jcc Jb */ + val = (int8_t)insn_get(s, OT_BYTE); + goto do_jcc; + case 0x180 ... 0x18f: /* jcc Jv */ + if (dflag) { + val = insn_get(s, OT_LONG); + } else { + val = (int16_t)insn_get(s, OT_WORD); + } + do_jcc: + next_eip = s->pc - s->cs_base; + val += next_eip; + if (s->dflag == 0) + val &= 0xffff; + gen_jcc(s, b & 0xf, val, next_eip); + goto no_copy; + + /************************/ + /* flags */ + case 0x9c: /* pushf */ + /* XXX: put specific code ? */ + goto unsupported_op; + case 0x9d: /* popf */ + goto unsupported_op; + + case 0x9e: /* sahf */ + case 0x9f: /* lahf */ + case 0xf5: /* cmc */ + case 0xf8: /* clc */ + case 0xf9: /* stc */ + case 0xfc: /* cld */ + case 0xfd: /* std */ + break; + + /************************/ + /* bit operations */ + case 0x1ba: /* bt/bts/btr/btc Gv, im */ + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + op = (modrm >> 3) & 7; + parse_modrm(s, modrm); + /* load shift */ + ldub_code(s->pc++); + if (op < 4) + goto illegal_op; + break; + /************************/ + /* bcd */ + case 0x27: /* daa */ + break; + case 0x2f: /* das */ + break; + case 0x37: /* aaa */ + break; + case 0x3f: /* aas */ + break; + case 0xd4: /* aam */ + ldub_code(s->pc++); + break; + case 0xd5: /* aad */ + ldub_code(s->pc++); + break; + /************************/ + /* misc */ + case 0x90: /* nop */ + break; + case 0x9b: /* fwait */ + if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == + (HF_MP_MASK | HF_TS_MASK)) { + goto unsupported_op; + } + break; + case 0xcc: /* int3 */ + goto unsupported_op; + case 0xcd: /* int N */ + goto unsupported_op; + case 0xce: /* into */ + goto unsupported_op; + case 0xf1: /* icebp (undocumented, exits to external debugger) */ + goto unsupported_op; + case 0xfa: /* cli */ + goto unsupported_op; + case 0xfb: /* sti */ + goto unsupported_op; + case 0x62: /* bound */ + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + parse_modrm(s, modrm); + break; + case 0x1c8 ... 0x1cf: /* bswap reg */ + break; + case 0xd6: /* salc */ + break; + case 0xe0: /* loopnz */ + case 0xe1: /* loopz */ + case 0xe2: /* loop */ + case 0xe3: /* jecxz */ + goto unsupported_op; + + case 0x130: /* wrmsr */ + case 0x132: /* rdmsr */ + goto unsupported_op; + case 0x131: /* rdtsc */ + goto unsupported_op; + case 0x1a2: /* cpuid */ + goto unsupported_op; + case 0xf4: /* hlt */ + goto unsupported_op; + case 0x100: + goto unsupported_op; + case 0x101: + goto unsupported_op; + case 0x108: /* invd */ + case 0x109: /* wbinvd */ + goto unsupported_op; + case 0x63: /* arpl */ + goto unsupported_op; + case 0x102: /* lar */ + case 0x103: /* lsl */ + goto unsupported_op; + case 0x118: + goto unsupported_op; + case 0x120: /* mov reg, crN */ + case 0x122: /* mov crN, reg */ + goto unsupported_op; + case 0x121: /* mov reg, drN */ + case 0x123: /* mov drN, reg */ + goto unsupported_op; + case 0x106: /* clts */ + goto unsupported_op; + default: + goto illegal_op; + } + + /* just copy the code */ + + /* no override yet */ + if (!s->dflag) + gb(s, 0x66); + if (!s->aflag) + gb(s, 0x67); + if (prefixes & PREFIX_REPZ) + gb(s, 0xf3); + else if (prefixes & PREFIX_REPNZ) + gb(s, 0xf2); + { + int len, i; + len = s->pc - pc_start_insn; + for(i = 0; i < len; i++) { + *s->gen_code_ptr++ = ldub_code(pc_start_insn + i); + } + } + no_copy: + return 0; + illegal_op: + unsupported_op: + /* fall back to slower code gen necessary */ + s->pc = pc_start; + return -1; +} + +#define GEN_CODE_MAX_SIZE 8192 +#define GEN_CODE_MAX_INSN_SIZE 512 + +static inline int gen_intermediate_code_internal(CPUState *env, + TranslationBlock *tb, + uint8_t *gen_code_ptr, + int *gen_code_size_ptr, + int search_pc, + uint8_t *tc_ptr) +{ + DisasContext dc1, *dc = &dc1; + target_ulong pc_insn, pc_start, cs_base; + uint8_t *gen_code_end; + int flags, ret; + + if (env->nb_breakpoints > 0 || + env->singlestep_enabled) + return -1; + flags = tb->flags; + if (flags & (HF_TF_MASK | HF_ADDSEG_MASK | + HF_SOFTMMU_MASK | HF_INHIBIT_IRQ_MASK)) + return -1; + if (!(flags & HF_SS32_MASK)) + return -1; + if (tb->cflags & CF_SINGLE_INSN) + return -1; + gen_code_end = gen_code_ptr + + GEN_CODE_MAX_SIZE - GEN_CODE_MAX_INSN_SIZE; + dc->gen_code_ptr = gen_code_ptr; + dc->gen_code_start = gen_code_ptr; + + /* generate intermediate code */ + pc_start = tb->pc; + cs_base = tb->cs_base; + dc->pc = pc_start; + dc->cs_base = cs_base; + dc->pe = (flags >> HF_PE_SHIFT) & 1; + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; + dc->f_st = 0; + dc->vm86 = (flags >> VM_SHIFT) & 1; + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; + dc->iopl = (flags >> IOPL_SHIFT) & 3; + dc->tb = tb; + dc->flags = flags; + + dc->is_jmp = 0; + + for(;;) { + pc_insn = dc->pc; + ret = disas_insn(dc); + if (ret < 0) { + /* unsupported insn */ + if (dc->pc == pc_start) { + /* if first instruction, signal that no copying was done */ + return -1; + } else { + gen_jmp(dc, dc->pc - dc->cs_base); + dc->is_jmp = 1; + } + } + if (search_pc) { + /* search pc mode */ + if (tc_ptr < dc->gen_code_ptr) { + env->eip = pc_insn - cs_base; + return 0; + } + } + /* stop translation if indicated */ + if (dc->is_jmp) + break; + /* if too long translation, stop generation */ + if (dc->gen_code_ptr >= gen_code_end || + (dc->pc - pc_start) >= (TARGET_PAGE_SIZE - 32)) { + gen_jmp(dc, dc->pc - dc->cs_base); + break; + } + } + +#ifdef DEBUG_DISAS + if (loglevel & CPU_LOG_TB_IN_ASM) { + fprintf(logfile, "----------------\n"); + fprintf(logfile, "IN: COPY: %s fpu=%d\n", + lookup_symbol(pc_start), + tb->cflags & CF_TB_FP_USED ? 1 : 0); + target_disas(logfile, pc_start, dc->pc - pc_start, !dc->code32); + fprintf(logfile, "\n"); + } +#endif + + if (!search_pc) { + *gen_code_size_ptr = dc->gen_code_ptr - dc->gen_code_start; + tb->size = dc->pc - pc_start; + tb->cflags |= CF_CODE_COPY; + return 0; + } else { + return -1; + } +} + +/* generate code by just copying data. Return -1 if cannot generate + any code. Return 0 if code was generated */ +int cpu_gen_code_copy(CPUState *env, TranslationBlock *tb, + int max_code_size, int *gen_code_size_ptr) +{ + /* generate machine code */ + tb->tb_next_offset[0] = 0xffff; + tb->tb_next_offset[1] = 0xffff; +#ifdef USE_DIRECT_JUMP + /* the following two entries are optional (only used for string ops) */ + tb->tb_jmp_offset[2] = 0xffff; + tb->tb_jmp_offset[3] = 0xffff; +#endif + return gen_intermediate_code_internal(env, tb, + tb->tc_ptr, gen_code_size_ptr, + 0, NULL); +} + +static uint8_t dummy_gen_code_buf[GEN_CODE_MAX_SIZE]; + +int cpu_restore_state_copy(TranslationBlock *tb, + CPUState *env, unsigned long searched_pc, + void *puc) +{ + struct ucontext *uc = puc; + int ret, eflags; + + /* find opc index corresponding to search_pc */ + if (searched_pc < (unsigned long)tb->tc_ptr) + return -1; + searched_pc = searched_pc - (long)tb->tc_ptr + (long)dummy_gen_code_buf; + ret = gen_intermediate_code_internal(env, tb, + dummy_gen_code_buf, NULL, + 1, (uint8_t *)searched_pc); + if (ret < 0) + return ret; + /* restore all the CPU state from the CPU context from the + signal. The FPU context stays in the host CPU. */ + + env->regs[R_EAX] = uc->uc_mcontext.gregs[REG_EAX]; + env->regs[R_ECX] = uc->uc_mcontext.gregs[REG_ECX]; + env->regs[R_EDX] = uc->uc_mcontext.gregs[REG_EDX]; + env->regs[R_EBX] = uc->uc_mcontext.gregs[REG_EBX]; + env->regs[R_ESP] = uc->uc_mcontext.gregs[REG_ESP]; + env->regs[R_EBP] = uc->uc_mcontext.gregs[REG_EBP]; + env->regs[R_ESI] = uc->uc_mcontext.gregs[REG_ESI]; + env->regs[R_EDI] = uc->uc_mcontext.gregs[REG_EDI]; + eflags = uc->uc_mcontext.gregs[REG_EFL]; + env->df = 1 - (2 * ((eflags >> 10) & 1)); + env->cc_src = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + env->cc_op = CC_OP_EFLAGS; + return 0; +} + +#endif /* USE_CODE_COPY */ diff --git a/target-i386/translate.c b/target-i386/translate.c new file mode 100644 index 0000000..f905f32 --- /dev/null +++ b/target-i386/translate.c @@ -0,0 +1,6523 @@ +/* + * i386 translation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <stdarg.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <signal.h> +#include <assert.h> + +#include "cpu.h" +#include "exec-all.h" +#include "disas.h" + +/* XXX: move that elsewhere */ +static uint16_t *gen_opc_ptr; +static uint32_t *gen_opparam_ptr; + +#define PREFIX_REPZ 0x01 +#define PREFIX_REPNZ 0x02 +#define PREFIX_LOCK 0x04 +#define PREFIX_DATA 0x08 +#define PREFIX_ADR 0x10 + +#ifdef TARGET_X86_64 +#define X86_64_ONLY(x) x +#define X86_64_DEF(x...) x +#define CODE64(s) ((s)->code64) +#define REX_X(s) ((s)->rex_x) +#define REX_B(s) ((s)->rex_b) +/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */ +#if 1 +#define BUGGY_64(x) NULL +#endif +#else +#define X86_64_ONLY(x) NULL +#define X86_64_DEF(x...) +#define CODE64(s) 0 +#define REX_X(s) 0 +#define REX_B(s) 0 +#endif + +#ifdef TARGET_X86_64 +static int x86_64_hregs; +#endif + +#ifdef USE_DIRECT_JUMP +#define TBPARAM(x) +#else +#define TBPARAM(x) (long)(x) +#endif + +typedef struct DisasContext { + /* current insn context */ + int override; /* -1 if no override */ + int prefix; + int aflag, dflag; + target_ulong pc; /* pc = eip + cs_base */ + int is_jmp; /* 1 = means jump (stop translation), 2 means CPU + static state change (stop translation) */ + /* current block context */ + target_ulong cs_base; /* base of CS segment */ + int pe; /* protected mode */ + int code32; /* 32 bit code segment */ +#ifdef TARGET_X86_64 + int lma; /* long mode active */ + int code64; /* 64 bit code segment */ + int rex_x, rex_b; +#endif + int ss32; /* 32 bit stack segment */ + int cc_op; /* current CC operation */ + int addseg; /* non zero if either DS/ES/SS have a non zero base */ + int f_st; /* currently unused */ + int vm86; /* vm86 mode */ + int cpl; + int iopl; + int tf; /* TF cpu flag */ + int singlestep_enabled; /* "hardware" single step enabled */ + int jmp_opt; /* use direct block chaining for direct jumps */ + int mem_index; /* select memory access functions */ + int flags; /* all execution flags */ + struct TranslationBlock *tb; + int popl_esp_hack; /* for correct popl with esp base handling */ + int rip_offset; /* only used in x86_64, but left for simplicity */ + int cpuid_features; + int cpuid_ext_features; +} DisasContext; + +static void gen_eob(DisasContext *s); +static void gen_jmp(DisasContext *s, target_ulong eip); +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); + +/* i386 arith/logic operations */ +enum { + OP_ADDL, + OP_ORL, + OP_ADCL, + OP_SBBL, + OP_ANDL, + OP_SUBL, + OP_XORL, + OP_CMPL, +}; + +/* i386 shift ops */ +enum { + OP_ROL, + OP_ROR, + OP_RCL, + OP_RCR, + OP_SHL, + OP_SHR, + OP_SHL1, /* undocumented */ + OP_SAR = 7, +}; + +enum { +#define DEF(s, n, copy_size) INDEX_op_ ## s, +#include "opc.h" +#undef DEF + NB_OPS, +}; + +#include "gen-op.h" + +/* operand size */ +enum { + OT_BYTE = 0, + OT_WORD, + OT_LONG, + OT_QUAD, +}; + +enum { + /* I386 int registers */ + OR_EAX, /* MUST be even numbered */ + OR_ECX, + OR_EDX, + OR_EBX, + OR_ESP, + OR_EBP, + OR_ESI, + OR_EDI, + + OR_TMP0 = 16, /* temporary operand register */ + OR_TMP1, + OR_A0, /* temporary register used when doing address evaluation */ +}; + +#ifdef TARGET_X86_64 + +#define NB_OP_SIZES 4 + +#define DEF_REGS(prefix, suffix) \ + prefix ## EAX ## suffix,\ + prefix ## ECX ## suffix,\ + prefix ## EDX ## suffix,\ + prefix ## EBX ## suffix,\ + prefix ## ESP ## suffix,\ + prefix ## EBP ## suffix,\ + prefix ## ESI ## suffix,\ + prefix ## EDI ## suffix,\ + prefix ## R8 ## suffix,\ + prefix ## R9 ## suffix,\ + prefix ## R10 ## suffix,\ + prefix ## R11 ## suffix,\ + prefix ## R12 ## suffix,\ + prefix ## R13 ## suffix,\ + prefix ## R14 ## suffix,\ + prefix ## R15 ## suffix, + +#define DEF_BREGS(prefixb, prefixh, suffix) \ + \ +static void prefixb ## ESP ## suffix ## _wrapper(void) \ +{ \ + if (x86_64_hregs) \ + prefixb ## ESP ## suffix (); \ + else \ + prefixh ## EAX ## suffix (); \ +} \ + \ +static void prefixb ## EBP ## suffix ## _wrapper(void) \ +{ \ + if (x86_64_hregs) \ + prefixb ## EBP ## suffix (); \ + else \ + prefixh ## ECX ## suffix (); \ +} \ + \ +static void prefixb ## ESI ## suffix ## _wrapper(void) \ +{ \ + if (x86_64_hregs) \ + prefixb ## ESI ## suffix (); \ + else \ + prefixh ## EDX ## suffix (); \ +} \ + \ +static void prefixb ## EDI ## suffix ## _wrapper(void) \ +{ \ + if (x86_64_hregs) \ + prefixb ## EDI ## suffix (); \ + else \ + prefixh ## EBX ## suffix (); \ +} + +DEF_BREGS(gen_op_movb_, gen_op_movh_, _T0) +DEF_BREGS(gen_op_movb_, gen_op_movh_, _T1) +DEF_BREGS(gen_op_movl_T0_, gen_op_movh_T0_, ) +DEF_BREGS(gen_op_movl_T1_, gen_op_movh_T1_, ) + +#else /* !TARGET_X86_64 */ + +#define NB_OP_SIZES 3 + +#define DEF_REGS(prefix, suffix) \ + prefix ## EAX ## suffix,\ + prefix ## ECX ## suffix,\ + prefix ## EDX ## suffix,\ + prefix ## EBX ## suffix,\ + prefix ## ESP ## suffix,\ + prefix ## EBP ## suffix,\ + prefix ## ESI ## suffix,\ + prefix ## EDI ## suffix, + +#endif /* !TARGET_X86_64 */ + +static GenOpFunc *gen_op_mov_reg_T0[NB_OP_SIZES][CPU_NB_REGS] = { + [OT_BYTE] = { + gen_op_movb_EAX_T0, + gen_op_movb_ECX_T0, + gen_op_movb_EDX_T0, + gen_op_movb_EBX_T0, +#ifdef TARGET_X86_64 + gen_op_movb_ESP_T0_wrapper, + gen_op_movb_EBP_T0_wrapper, + gen_op_movb_ESI_T0_wrapper, + gen_op_movb_EDI_T0_wrapper, + gen_op_movb_R8_T0, + gen_op_movb_R9_T0, + gen_op_movb_R10_T0, + gen_op_movb_R11_T0, + gen_op_movb_R12_T0, + gen_op_movb_R13_T0, + gen_op_movb_R14_T0, + gen_op_movb_R15_T0, +#else + gen_op_movh_EAX_T0, + gen_op_movh_ECX_T0, + gen_op_movh_EDX_T0, + gen_op_movh_EBX_T0, +#endif + }, + [OT_WORD] = { + DEF_REGS(gen_op_movw_, _T0) + }, + [OT_LONG] = { + DEF_REGS(gen_op_movl_, _T0) + }, +#ifdef TARGET_X86_64 + [OT_QUAD] = { + DEF_REGS(gen_op_movq_, _T0) + }, +#endif +}; + +static GenOpFunc *gen_op_mov_reg_T1[NB_OP_SIZES][CPU_NB_REGS] = { + [OT_BYTE] = { + gen_op_movb_EAX_T1, + gen_op_movb_ECX_T1, + gen_op_movb_EDX_T1, + gen_op_movb_EBX_T1, +#ifdef TARGET_X86_64 + gen_op_movb_ESP_T1_wrapper, + gen_op_movb_EBP_T1_wrapper, + gen_op_movb_ESI_T1_wrapper, + gen_op_movb_EDI_T1_wrapper, + gen_op_movb_R8_T1, + gen_op_movb_R9_T1, + gen_op_movb_R10_T1, + gen_op_movb_R11_T1, + gen_op_movb_R12_T1, + gen_op_movb_R13_T1, + gen_op_movb_R14_T1, + gen_op_movb_R15_T1, +#else + gen_op_movh_EAX_T1, + gen_op_movh_ECX_T1, + gen_op_movh_EDX_T1, + gen_op_movh_EBX_T1, +#endif + }, + [OT_WORD] = { + DEF_REGS(gen_op_movw_, _T1) + }, + [OT_LONG] = { + DEF_REGS(gen_op_movl_, _T1) + }, +#ifdef TARGET_X86_64 + [OT_QUAD] = { + DEF_REGS(gen_op_movq_, _T1) + }, +#endif +}; + +static GenOpFunc *gen_op_mov_reg_A0[NB_OP_SIZES - 1][CPU_NB_REGS] = { + [0] = { + DEF_REGS(gen_op_movw_, _A0) + }, + [1] = { + DEF_REGS(gen_op_movl_, _A0) + }, +#ifdef TARGET_X86_64 + [2] = { + DEF_REGS(gen_op_movq_, _A0) + }, +#endif +}; + +static GenOpFunc *gen_op_mov_TN_reg[NB_OP_SIZES][2][CPU_NB_REGS] = +{ + [OT_BYTE] = { + { + gen_op_movl_T0_EAX, + gen_op_movl_T0_ECX, + gen_op_movl_T0_EDX, + gen_op_movl_T0_EBX, +#ifdef TARGET_X86_64 + gen_op_movl_T0_ESP_wrapper, + gen_op_movl_T0_EBP_wrapper, + gen_op_movl_T0_ESI_wrapper, + gen_op_movl_T0_EDI_wrapper, + gen_op_movl_T0_R8, + gen_op_movl_T0_R9, + gen_op_movl_T0_R10, + gen_op_movl_T0_R11, + gen_op_movl_T0_R12, + gen_op_movl_T0_R13, + gen_op_movl_T0_R14, + gen_op_movl_T0_R15, +#else + gen_op_movh_T0_EAX, + gen_op_movh_T0_ECX, + gen_op_movh_T0_EDX, + gen_op_movh_T0_EBX, +#endif + }, + { + gen_op_movl_T1_EAX, + gen_op_movl_T1_ECX, + gen_op_movl_T1_EDX, + gen_op_movl_T1_EBX, +#ifdef TARGET_X86_64 + gen_op_movl_T1_ESP_wrapper, + gen_op_movl_T1_EBP_wrapper, + gen_op_movl_T1_ESI_wrapper, + gen_op_movl_T1_EDI_wrapper, + gen_op_movl_T1_R8, + gen_op_movl_T1_R9, + gen_op_movl_T1_R10, + gen_op_movl_T1_R11, + gen_op_movl_T1_R12, + gen_op_movl_T1_R13, + gen_op_movl_T1_R14, + gen_op_movl_T1_R15, +#else + gen_op_movh_T1_EAX, + gen_op_movh_T1_ECX, + gen_op_movh_T1_EDX, + gen_op_movh_T1_EBX, +#endif + }, + }, + [OT_WORD] = { + { + DEF_REGS(gen_op_movl_T0_, ) + }, + { + DEF_REGS(gen_op_movl_T1_, ) + }, + }, + [OT_LONG] = { + { + DEF_REGS(gen_op_movl_T0_, ) + }, + { + DEF_REGS(gen_op_movl_T1_, ) + }, + }, +#ifdef TARGET_X86_64 + [OT_QUAD] = { + { + DEF_REGS(gen_op_movl_T0_, ) + }, + { + DEF_REGS(gen_op_movl_T1_, ) + }, + }, +#endif +}; + +static GenOpFunc *gen_op_movl_A0_reg[CPU_NB_REGS] = { + DEF_REGS(gen_op_movl_A0_, ) +}; + +static GenOpFunc *gen_op_addl_A0_reg_sN[4][CPU_NB_REGS] = { + [0] = { + DEF_REGS(gen_op_addl_A0_, ) + }, + [1] = { + DEF_REGS(gen_op_addl_A0_, _s1) + }, + [2] = { + DEF_REGS(gen_op_addl_A0_, _s2) + }, + [3] = { + DEF_REGS(gen_op_addl_A0_, _s3) + }, +}; + +#ifdef TARGET_X86_64 +static GenOpFunc *gen_op_movq_A0_reg[CPU_NB_REGS] = { + DEF_REGS(gen_op_movq_A0_, ) +}; + +static GenOpFunc *gen_op_addq_A0_reg_sN[4][CPU_NB_REGS] = { + [0] = { + DEF_REGS(gen_op_addq_A0_, ) + }, + [1] = { + DEF_REGS(gen_op_addq_A0_, _s1) + }, + [2] = { + DEF_REGS(gen_op_addq_A0_, _s2) + }, + [3] = { + DEF_REGS(gen_op_addq_A0_, _s3) + }, +}; +#endif + +static GenOpFunc *gen_op_cmov_reg_T1_T0[NB_OP_SIZES - 1][CPU_NB_REGS] = { + [0] = { + DEF_REGS(gen_op_cmovw_, _T1_T0) + }, + [1] = { + DEF_REGS(gen_op_cmovl_, _T1_T0) + }, +#ifdef TARGET_X86_64 + [2] = { + DEF_REGS(gen_op_cmovq_, _T1_T0) + }, +#endif +}; + +static GenOpFunc *gen_op_arith_T0_T1_cc[8] = { + NULL, + gen_op_orl_T0_T1, + NULL, + NULL, + gen_op_andl_T0_T1, + NULL, + gen_op_xorl_T0_T1, + NULL, +}; + +#define DEF_ARITHC(SUFFIX)\ + {\ + gen_op_adcb ## SUFFIX ## _T0_T1_cc,\ + gen_op_sbbb ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + gen_op_adcw ## SUFFIX ## _T0_T1_cc,\ + gen_op_sbbw ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + gen_op_adcl ## SUFFIX ## _T0_T1_cc,\ + gen_op_sbbl ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + X86_64_ONLY(gen_op_adcq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_sbbq ## SUFFIX ## _T0_T1_cc),\ + }, + +static GenOpFunc *gen_op_arithc_T0_T1_cc[4][2] = { + DEF_ARITHC( ) +}; + +static GenOpFunc *gen_op_arithc_mem_T0_T1_cc[3 * 4][2] = { + DEF_ARITHC(_raw) +#ifndef CONFIG_USER_ONLY + DEF_ARITHC(_kernel) + DEF_ARITHC(_user) +#endif +}; + +static const int cc_op_arithb[8] = { + CC_OP_ADDB, + CC_OP_LOGICB, + CC_OP_ADDB, + CC_OP_SUBB, + CC_OP_LOGICB, + CC_OP_SUBB, + CC_OP_LOGICB, + CC_OP_SUBB, +}; + +#define DEF_CMPXCHG(SUFFIX)\ + gen_op_cmpxchgb ## SUFFIX ## _T0_T1_EAX_cc,\ + gen_op_cmpxchgw ## SUFFIX ## _T0_T1_EAX_cc,\ + gen_op_cmpxchgl ## SUFFIX ## _T0_T1_EAX_cc,\ + X86_64_ONLY(gen_op_cmpxchgq ## SUFFIX ## _T0_T1_EAX_cc), + +static GenOpFunc *gen_op_cmpxchg_T0_T1_EAX_cc[4] = { + DEF_CMPXCHG( ) +}; + +static GenOpFunc *gen_op_cmpxchg_mem_T0_T1_EAX_cc[3 * 4] = { + DEF_CMPXCHG(_raw) +#ifndef CONFIG_USER_ONLY + DEF_CMPXCHG(_kernel) + DEF_CMPXCHG(_user) +#endif +}; + +#define DEF_SHIFT(SUFFIX)\ + {\ + gen_op_rolb ## SUFFIX ## _T0_T1_cc,\ + gen_op_rorb ## SUFFIX ## _T0_T1_cc,\ + gen_op_rclb ## SUFFIX ## _T0_T1_cc,\ + gen_op_rcrb ## SUFFIX ## _T0_T1_cc,\ + gen_op_shlb ## SUFFIX ## _T0_T1_cc,\ + gen_op_shrb ## SUFFIX ## _T0_T1_cc,\ + gen_op_shlb ## SUFFIX ## _T0_T1_cc,\ + gen_op_sarb ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + gen_op_rolw ## SUFFIX ## _T0_T1_cc,\ + gen_op_rorw ## SUFFIX ## _T0_T1_cc,\ + gen_op_rclw ## SUFFIX ## _T0_T1_cc,\ + gen_op_rcrw ## SUFFIX ## _T0_T1_cc,\ + gen_op_shlw ## SUFFIX ## _T0_T1_cc,\ + gen_op_shrw ## SUFFIX ## _T0_T1_cc,\ + gen_op_shlw ## SUFFIX ## _T0_T1_cc,\ + gen_op_sarw ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + gen_op_roll ## SUFFIX ## _T0_T1_cc,\ + gen_op_rorl ## SUFFIX ## _T0_T1_cc,\ + gen_op_rcll ## SUFFIX ## _T0_T1_cc,\ + gen_op_rcrl ## SUFFIX ## _T0_T1_cc,\ + gen_op_shll ## SUFFIX ## _T0_T1_cc,\ + gen_op_shrl ## SUFFIX ## _T0_T1_cc,\ + gen_op_shll ## SUFFIX ## _T0_T1_cc,\ + gen_op_sarl ## SUFFIX ## _T0_T1_cc,\ + },\ + {\ + X86_64_ONLY(gen_op_rolq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_rorq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_rclq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_rcrq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_shlq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_shrq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_shlq ## SUFFIX ## _T0_T1_cc),\ + X86_64_ONLY(gen_op_sarq ## SUFFIX ## _T0_T1_cc),\ + }, + +static GenOpFunc *gen_op_shift_T0_T1_cc[4][8] = { + DEF_SHIFT( ) +}; + +static GenOpFunc *gen_op_shift_mem_T0_T1_cc[3 * 4][8] = { + DEF_SHIFT(_raw) +#ifndef CONFIG_USER_ONLY + DEF_SHIFT(_kernel) + DEF_SHIFT(_user) +#endif +}; + +#define DEF_SHIFTD(SUFFIX, op)\ + {\ + NULL,\ + NULL,\ + },\ + {\ + gen_op_shldw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ + gen_op_shrdw ## SUFFIX ## _T0_T1_ ## op ## _cc,\ + },\ + {\ + gen_op_shldl ## SUFFIX ## _T0_T1_ ## op ## _cc,\ + gen_op_shrdl ## SUFFIX ## _T0_T1_ ## op ## _cc,\ + },\ + {\ +X86_64_DEF(gen_op_shldq ## SUFFIX ## _T0_T1_ ## op ## _cc,\ + gen_op_shrdq ## SUFFIX ## _T0_T1_ ## op ## _cc,)\ + }, + +static GenOpFunc1 *gen_op_shiftd_T0_T1_im_cc[4][2] = { + DEF_SHIFTD(, im) +}; + +static GenOpFunc *gen_op_shiftd_T0_T1_ECX_cc[4][2] = { + DEF_SHIFTD(, ECX) +}; + +static GenOpFunc1 *gen_op_shiftd_mem_T0_T1_im_cc[3 * 4][2] = { + DEF_SHIFTD(_raw, im) +#ifndef CONFIG_USER_ONLY + DEF_SHIFTD(_kernel, im) + DEF_SHIFTD(_user, im) +#endif +}; + +static GenOpFunc *gen_op_shiftd_mem_T0_T1_ECX_cc[3 * 4][2] = { + DEF_SHIFTD(_raw, ECX) +#ifndef CONFIG_USER_ONLY + DEF_SHIFTD(_kernel, ECX) + DEF_SHIFTD(_user, ECX) +#endif +}; + +static GenOpFunc *gen_op_btx_T0_T1_cc[3][4] = { + [0] = { + gen_op_btw_T0_T1_cc, + gen_op_btsw_T0_T1_cc, + gen_op_btrw_T0_T1_cc, + gen_op_btcw_T0_T1_cc, + }, + [1] = { + gen_op_btl_T0_T1_cc, + gen_op_btsl_T0_T1_cc, + gen_op_btrl_T0_T1_cc, + gen_op_btcl_T0_T1_cc, + }, +#ifdef TARGET_X86_64 + [2] = { + gen_op_btq_T0_T1_cc, + gen_op_btsq_T0_T1_cc, + gen_op_btrq_T0_T1_cc, + gen_op_btcq_T0_T1_cc, + }, +#endif +}; + +static GenOpFunc *gen_op_add_bit_A0_T1[3] = { + gen_op_add_bitw_A0_T1, + gen_op_add_bitl_A0_T1, + X86_64_ONLY(gen_op_add_bitq_A0_T1), +}; + +static GenOpFunc *gen_op_bsx_T0_cc[3][2] = { + [0] = { + gen_op_bsfw_T0_cc, + gen_op_bsrw_T0_cc, + }, + [1] = { + gen_op_bsfl_T0_cc, + gen_op_bsrl_T0_cc, + }, +#ifdef TARGET_X86_64 + [2] = { + gen_op_bsfq_T0_cc, + gen_op_bsrq_T0_cc, + }, +#endif +}; + +static GenOpFunc *gen_op_lds_T0_A0[3 * 4] = { + gen_op_ldsb_raw_T0_A0, + gen_op_ldsw_raw_T0_A0, + X86_64_ONLY(gen_op_ldsl_raw_T0_A0), + NULL, +#ifndef CONFIG_USER_ONLY + gen_op_ldsb_kernel_T0_A0, + gen_op_ldsw_kernel_T0_A0, + X86_64_ONLY(gen_op_ldsl_kernel_T0_A0), + NULL, + + gen_op_ldsb_user_T0_A0, + gen_op_ldsw_user_T0_A0, + X86_64_ONLY(gen_op_ldsl_user_T0_A0), + NULL, +#endif +}; + +static GenOpFunc *gen_op_ldu_T0_A0[3 * 4] = { + gen_op_ldub_raw_T0_A0, + gen_op_lduw_raw_T0_A0, + NULL, + NULL, + +#ifndef CONFIG_USER_ONLY + gen_op_ldub_kernel_T0_A0, + gen_op_lduw_kernel_T0_A0, + NULL, + NULL, + + gen_op_ldub_user_T0_A0, + gen_op_lduw_user_T0_A0, + NULL, + NULL, +#endif +}; + +/* sign does not matter, except for lidt/lgdt call (TODO: fix it) */ +static GenOpFunc *gen_op_ld_T0_A0[3 * 4] = { + gen_op_ldub_raw_T0_A0, + gen_op_lduw_raw_T0_A0, + gen_op_ldl_raw_T0_A0, + X86_64_ONLY(gen_op_ldq_raw_T0_A0), + +#ifndef CONFIG_USER_ONLY + gen_op_ldub_kernel_T0_A0, + gen_op_lduw_kernel_T0_A0, + gen_op_ldl_kernel_T0_A0, + X86_64_ONLY(gen_op_ldq_kernel_T0_A0), + + gen_op_ldub_user_T0_A0, + gen_op_lduw_user_T0_A0, + gen_op_ldl_user_T0_A0, + X86_64_ONLY(gen_op_ldq_user_T0_A0), +#endif +}; + +static GenOpFunc *gen_op_ld_T1_A0[3 * 4] = { + gen_op_ldub_raw_T1_A0, + gen_op_lduw_raw_T1_A0, + gen_op_ldl_raw_T1_A0, + X86_64_ONLY(gen_op_ldq_raw_T1_A0), + +#ifndef CONFIG_USER_ONLY + gen_op_ldub_kernel_T1_A0, + gen_op_lduw_kernel_T1_A0, + gen_op_ldl_kernel_T1_A0, + X86_64_ONLY(gen_op_ldq_kernel_T1_A0), + + gen_op_ldub_user_T1_A0, + gen_op_lduw_user_T1_A0, + gen_op_ldl_user_T1_A0, + X86_64_ONLY(gen_op_ldq_user_T1_A0), +#endif +}; + +static GenOpFunc *gen_op_st_T0_A0[3 * 4] = { + gen_op_stb_raw_T0_A0, + gen_op_stw_raw_T0_A0, + gen_op_stl_raw_T0_A0, + X86_64_ONLY(gen_op_stq_raw_T0_A0), + +#ifndef CONFIG_USER_ONLY + gen_op_stb_kernel_T0_A0, + gen_op_stw_kernel_T0_A0, + gen_op_stl_kernel_T0_A0, + X86_64_ONLY(gen_op_stq_kernel_T0_A0), + + gen_op_stb_user_T0_A0, + gen_op_stw_user_T0_A0, + gen_op_stl_user_T0_A0, + X86_64_ONLY(gen_op_stq_user_T0_A0), +#endif +}; + +static GenOpFunc *gen_op_st_T1_A0[3 * 4] = { + NULL, + gen_op_stw_raw_T1_A0, + gen_op_stl_raw_T1_A0, + X86_64_ONLY(gen_op_stq_raw_T1_A0), + +#ifndef CONFIG_USER_ONLY + NULL, + gen_op_stw_kernel_T1_A0, + gen_op_stl_kernel_T1_A0, + X86_64_ONLY(gen_op_stq_kernel_T1_A0), + + NULL, + gen_op_stw_user_T1_A0, + gen_op_stl_user_T1_A0, + X86_64_ONLY(gen_op_stq_user_T1_A0), +#endif +}; + +static inline void gen_jmp_im(target_ulong pc) +{ +#ifdef TARGET_X86_64 + if (pc == (uint32_t)pc) { + gen_op_movl_eip_im(pc); + } else if (pc == (int32_t)pc) { + gen_op_movq_eip_im(pc); + } else { + gen_op_movq_eip_im64(pc >> 32, pc); + } +#else + gen_op_movl_eip_im(pc); +#endif +} + +static inline void gen_string_movl_A0_ESI(DisasContext *s) +{ + int override; + + override = s->override; +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + if (override >= 0) { + gen_op_movq_A0_seg(offsetof(CPUX86State,segs[override].base)); + gen_op_addq_A0_reg_sN[0][R_ESI](); + } else { + gen_op_movq_A0_reg[R_ESI](); + } + } else +#endif + if (s->aflag) { + /* 32 bit address */ + if (s->addseg && override < 0) + override = R_DS; + if (override >= 0) { + gen_op_movl_A0_seg(offsetof(CPUX86State,segs[override].base)); + gen_op_addl_A0_reg_sN[0][R_ESI](); + } else { + gen_op_movl_A0_reg[R_ESI](); + } + } else { + /* 16 address, always override */ + if (override < 0) + override = R_DS; + gen_op_movl_A0_reg[R_ESI](); + gen_op_andl_A0_ffff(); + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); + } +} + +static inline void gen_string_movl_A0_EDI(DisasContext *s) +{ +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_movq_A0_reg[R_EDI](); + } else +#endif + if (s->aflag) { + if (s->addseg) { + gen_op_movl_A0_seg(offsetof(CPUX86State,segs[R_ES].base)); + gen_op_addl_A0_reg_sN[0][R_EDI](); + } else { + gen_op_movl_A0_reg[R_EDI](); + } + } else { + gen_op_movl_A0_reg[R_EDI](); + gen_op_andl_A0_ffff(); + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_ES].base)); + } +} + +static GenOpFunc *gen_op_movl_T0_Dshift[4] = { + gen_op_movl_T0_Dshiftb, + gen_op_movl_T0_Dshiftw, + gen_op_movl_T0_Dshiftl, + X86_64_ONLY(gen_op_movl_T0_Dshiftq), +}; + +static GenOpFunc1 *gen_op_jnz_ecx[3] = { + gen_op_jnz_ecxw, + gen_op_jnz_ecxl, + X86_64_ONLY(gen_op_jnz_ecxq), +}; + +static GenOpFunc1 *gen_op_jz_ecx[3] = { + gen_op_jz_ecxw, + gen_op_jz_ecxl, + X86_64_ONLY(gen_op_jz_ecxq), +}; + +static GenOpFunc *gen_op_dec_ECX[3] = { + gen_op_decw_ECX, + gen_op_decl_ECX, + X86_64_ONLY(gen_op_decq_ECX), +}; + +static GenOpFunc1 *gen_op_string_jnz_sub[2][4] = { + { + gen_op_jnz_subb, + gen_op_jnz_subw, + gen_op_jnz_subl, + X86_64_ONLY(gen_op_jnz_subq), + }, + { + gen_op_jz_subb, + gen_op_jz_subw, + gen_op_jz_subl, + X86_64_ONLY(gen_op_jz_subq), + }, +}; + +static GenOpFunc *gen_op_in_DX_T0[3] = { + gen_op_inb_DX_T0, + gen_op_inw_DX_T0, + gen_op_inl_DX_T0, +}; + +static GenOpFunc *gen_op_out_DX_T0[3] = { + gen_op_outb_DX_T0, + gen_op_outw_DX_T0, + gen_op_outl_DX_T0, +}; + +static GenOpFunc *gen_op_in[3] = { + gen_op_inb_T0_T1, + gen_op_inw_T0_T1, + gen_op_inl_T0_T1, +}; + +static GenOpFunc *gen_op_out[3] = { + gen_op_outb_T0_T1, + gen_op_outw_T0_T1, + gen_op_outl_T0_T1, +}; + +static GenOpFunc *gen_check_io_T0[3] = { + gen_op_check_iob_T0, + gen_op_check_iow_T0, + gen_op_check_iol_T0, +}; + +static GenOpFunc *gen_check_io_DX[3] = { + gen_op_check_iob_DX, + gen_op_check_iow_DX, + gen_op_check_iol_DX, +}; + +static void gen_check_io(DisasContext *s, int ot, int use_dx, target_ulong cur_eip) +{ + if (s->pe && (s->cpl > s->iopl || s->vm86)) { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(cur_eip); + if (use_dx) + gen_check_io_DX[ot](); + else + gen_check_io_T0[ot](); + } +} + +static inline void gen_movs(DisasContext *s, int ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_string_movl_A0_EDI(s); + gen_op_st_T0_A0[ot + s->mem_index](); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_ESI_T0(); + gen_op_addq_EDI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_ESI_T0(); + gen_op_addl_EDI_T0(); + } else { + gen_op_addw_ESI_T0(); + gen_op_addw_EDI_T0(); + } +} + +static inline void gen_update_cc_op(DisasContext *s) +{ + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } +} + +/* XXX: does not work with gdbstub "ice" single step - not a + serious problem */ +static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) +{ + int l1, l2; + + l1 = gen_new_label(); + l2 = gen_new_label(); + gen_op_jnz_ecx[s->aflag](l1); + gen_set_label(l2); + gen_jmp_tb(s, next_eip, 1); + gen_set_label(l1); + return l2; +} + +static inline void gen_stos(DisasContext *s, int ot) +{ + gen_op_mov_TN_reg[OT_LONG][0][R_EAX](); + gen_string_movl_A0_EDI(s); + gen_op_st_T0_A0[ot + s->mem_index](); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_EDI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_EDI_T0(); + } else { + gen_op_addw_EDI_T0(); + } +} + +static inline void gen_lods(DisasContext *s, int ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_op_mov_reg_T0[ot][R_EAX](); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_ESI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_ESI_T0(); + } else { + gen_op_addw_ESI_T0(); + } +} + +static inline void gen_scas(DisasContext *s, int ot) +{ + gen_op_mov_TN_reg[OT_LONG][0][R_EAX](); + gen_string_movl_A0_EDI(s); + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_op_cmpl_T0_T1_cc(); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_EDI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_EDI_T0(); + } else { + gen_op_addw_EDI_T0(); + } +} + +static inline void gen_cmps(DisasContext *s, int ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_string_movl_A0_EDI(s); + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_op_cmpl_T0_T1_cc(); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_ESI_T0(); + gen_op_addq_EDI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_ESI_T0(); + gen_op_addl_EDI_T0(); + } else { + gen_op_addw_ESI_T0(); + gen_op_addw_EDI_T0(); + } +} + +static inline void gen_ins(DisasContext *s, int ot) +{ + gen_string_movl_A0_EDI(s); + gen_op_movl_T0_0(); + gen_op_st_T0_A0[ot + s->mem_index](); + gen_op_in_DX_T0[ot](); + gen_op_st_T0_A0[ot + s->mem_index](); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_EDI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_EDI_T0(); + } else { + gen_op_addw_EDI_T0(); + } +} + +static inline void gen_outs(DisasContext *s, int ot) +{ + gen_string_movl_A0_ESI(s); + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_op_out_DX_T0[ot](); + gen_op_movl_T0_Dshift[ot](); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_ESI_T0(); + } else +#endif + if (s->aflag) { + gen_op_addl_ESI_T0(); + } else { + gen_op_addw_ESI_T0(); + } +} + +/* same method as Valgrind : we generate jumps to current or next + instruction */ +#define GEN_REPZ(op) \ +static inline void gen_repz_ ## op(DisasContext *s, int ot, \ + target_ulong cur_eip, target_ulong next_eip) \ +{ \ + int l2;\ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_dec_ECX[s->aflag](); \ + /* a loop would cause two single step exceptions if ECX = 1 \ + before rep string_insn */ \ + if (!s->jmp_opt) \ + gen_op_jz_ecx[s->aflag](l2); \ + gen_jmp(s, cur_eip); \ +} + +#define GEN_REPZ2(op) \ +static inline void gen_repz_ ## op(DisasContext *s, int ot, \ + target_ulong cur_eip, \ + target_ulong next_eip, \ + int nz) \ +{ \ + int l2;\ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_dec_ECX[s->aflag](); \ + gen_op_set_cc_op(CC_OP_SUBB + ot); \ + gen_op_string_jnz_sub[nz][ot](l2);\ + if (!s->jmp_opt) \ + gen_op_jz_ecx[s->aflag](l2); \ + gen_jmp(s, cur_eip); \ +} + +GEN_REPZ(movs) +GEN_REPZ(stos) +GEN_REPZ(lods) +GEN_REPZ(ins) +GEN_REPZ(outs) +GEN_REPZ2(scas) +GEN_REPZ2(cmps) + +enum { + JCC_O, + JCC_B, + JCC_Z, + JCC_BE, + JCC_S, + JCC_P, + JCC_L, + JCC_LE, +}; + +static GenOpFunc1 *gen_jcc_sub[4][8] = { + [OT_BYTE] = { + NULL, + gen_op_jb_subb, + gen_op_jz_subb, + gen_op_jbe_subb, + gen_op_js_subb, + NULL, + gen_op_jl_subb, + gen_op_jle_subb, + }, + [OT_WORD] = { + NULL, + gen_op_jb_subw, + gen_op_jz_subw, + gen_op_jbe_subw, + gen_op_js_subw, + NULL, + gen_op_jl_subw, + gen_op_jle_subw, + }, + [OT_LONG] = { + NULL, + gen_op_jb_subl, + gen_op_jz_subl, + gen_op_jbe_subl, + gen_op_js_subl, + NULL, + gen_op_jl_subl, + gen_op_jle_subl, + }, +#ifdef TARGET_X86_64 + [OT_QUAD] = { + NULL, + BUGGY_64(gen_op_jb_subq), + gen_op_jz_subq, + BUGGY_64(gen_op_jbe_subq), + gen_op_js_subq, + NULL, + BUGGY_64(gen_op_jl_subq), + BUGGY_64(gen_op_jle_subq), + }, +#endif +}; +static GenOpFunc1 *gen_op_loop[3][4] = { + [0] = { + gen_op_loopnzw, + gen_op_loopzw, + gen_op_jnz_ecxw, + }, + [1] = { + gen_op_loopnzl, + gen_op_loopzl, + gen_op_jnz_ecxl, + }, +#ifdef TARGET_X86_64 + [2] = { + gen_op_loopnzq, + gen_op_loopzq, + gen_op_jnz_ecxq, + }, +#endif +}; + +static GenOpFunc *gen_setcc_slow[8] = { + gen_op_seto_T0_cc, + gen_op_setb_T0_cc, + gen_op_setz_T0_cc, + gen_op_setbe_T0_cc, + gen_op_sets_T0_cc, + gen_op_setp_T0_cc, + gen_op_setl_T0_cc, + gen_op_setle_T0_cc, +}; + +static GenOpFunc *gen_setcc_sub[4][8] = { + [OT_BYTE] = { + NULL, + gen_op_setb_T0_subb, + gen_op_setz_T0_subb, + gen_op_setbe_T0_subb, + gen_op_sets_T0_subb, + NULL, + gen_op_setl_T0_subb, + gen_op_setle_T0_subb, + }, + [OT_WORD] = { + NULL, + gen_op_setb_T0_subw, + gen_op_setz_T0_subw, + gen_op_setbe_T0_subw, + gen_op_sets_T0_subw, + NULL, + gen_op_setl_T0_subw, + gen_op_setle_T0_subw, + }, + [OT_LONG] = { + NULL, + gen_op_setb_T0_subl, + gen_op_setz_T0_subl, + gen_op_setbe_T0_subl, + gen_op_sets_T0_subl, + NULL, + gen_op_setl_T0_subl, + gen_op_setle_T0_subl, + }, +#ifdef TARGET_X86_64 + [OT_QUAD] = { + NULL, + gen_op_setb_T0_subq, + gen_op_setz_T0_subq, + gen_op_setbe_T0_subq, + gen_op_sets_T0_subq, + NULL, + gen_op_setl_T0_subq, + gen_op_setle_T0_subq, + }, +#endif +}; + +static GenOpFunc *gen_op_fp_arith_ST0_FT0[8] = { + gen_op_fadd_ST0_FT0, + gen_op_fmul_ST0_FT0, + gen_op_fcom_ST0_FT0, + gen_op_fcom_ST0_FT0, + gen_op_fsub_ST0_FT0, + gen_op_fsubr_ST0_FT0, + gen_op_fdiv_ST0_FT0, + gen_op_fdivr_ST0_FT0, +}; + +/* NOTE the exception in "r" op ordering */ +static GenOpFunc1 *gen_op_fp_arith_STN_ST0[8] = { + gen_op_fadd_STN_ST0, + gen_op_fmul_STN_ST0, + NULL, + NULL, + gen_op_fsubr_STN_ST0, + gen_op_fsub_STN_ST0, + gen_op_fdivr_STN_ST0, + gen_op_fdiv_STN_ST0, +}; + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_op(DisasContext *s1, int op, int ot, int d) +{ + GenOpFunc *gen_update_cc; + + if (d != OR_TMP0) { + gen_op_mov_TN_reg[ot][0][d](); + } else { + gen_op_ld_T0_A0[ot + s1->mem_index](); + } + switch(op) { + case OP_ADCL: + case OP_SBBL: + if (s1->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s1->cc_op); + if (d != OR_TMP0) { + gen_op_arithc_T0_T1_cc[ot][op - OP_ADCL](); + gen_op_mov_reg_T0[ot][d](); + } else { + gen_op_arithc_mem_T0_T1_cc[ot + s1->mem_index][op - OP_ADCL](); + } + s1->cc_op = CC_OP_DYNAMIC; + goto the_end; + case OP_ADDL: + gen_op_addl_T0_T1(); + s1->cc_op = CC_OP_ADDB + ot; + gen_update_cc = gen_op_update2_cc; + break; + case OP_SUBL: + gen_op_subl_T0_T1(); + s1->cc_op = CC_OP_SUBB + ot; + gen_update_cc = gen_op_update2_cc; + break; + default: + case OP_ANDL: + case OP_ORL: + case OP_XORL: + gen_op_arith_T0_T1_cc[op](); + s1->cc_op = CC_OP_LOGICB + ot; + gen_update_cc = gen_op_update1_cc; + break; + case OP_CMPL: + gen_op_cmpl_T0_T1_cc(); + s1->cc_op = CC_OP_SUBB + ot; + gen_update_cc = NULL; + break; + } + if (op != OP_CMPL) { + if (d != OR_TMP0) + gen_op_mov_reg_T0[ot][d](); + else + gen_op_st_T0_A0[ot + s1->mem_index](); + } + /* the flags update must happen after the memory write (precise + exception support) */ + if (gen_update_cc) + gen_update_cc(); + the_end: ; +} + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_inc(DisasContext *s1, int ot, int d, int c) +{ + if (d != OR_TMP0) + gen_op_mov_TN_reg[ot][0][d](); + else + gen_op_ld_T0_A0[ot + s1->mem_index](); + if (s1->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s1->cc_op); + if (c > 0) { + gen_op_incl_T0(); + s1->cc_op = CC_OP_INCB + ot; + } else { + gen_op_decl_T0(); + s1->cc_op = CC_OP_DECB + ot; + } + if (d != OR_TMP0) + gen_op_mov_reg_T0[ot][d](); + else + gen_op_st_T0_A0[ot + s1->mem_index](); + gen_op_update_inc_cc(); +} + +static void gen_shift(DisasContext *s1, int op, int ot, int d, int s) +{ + if (d != OR_TMP0) + gen_op_mov_TN_reg[ot][0][d](); + else + gen_op_ld_T0_A0[ot + s1->mem_index](); + if (s != OR_TMP1) + gen_op_mov_TN_reg[ot][1][s](); + /* for zero counts, flags are not updated, so must do it dynamically */ + if (s1->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s1->cc_op); + + if (d != OR_TMP0) + gen_op_shift_T0_T1_cc[ot][op](); + else + gen_op_shift_mem_T0_T1_cc[ot + s1->mem_index][op](); + if (d != OR_TMP0) + gen_op_mov_reg_T0[ot][d](); + s1->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ +} + +static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c) +{ + /* currently not optimized */ + gen_op_movl_T1_im(c); + gen_shift(s1, op, ot, d, OR_TMP1); +} + +static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr) +{ + target_long disp; + int havesib; + int base; + int index; + int scale; + int opreg; + int mod, rm, code, override, must_add_seg; + + override = s->override; + must_add_seg = s->addseg; + if (override >= 0) + must_add_seg = 1; + mod = (modrm >> 6) & 3; + rm = modrm & 7; + + if (s->aflag) { + + havesib = 0; + base = rm; + index = 0; + scale = 0; + + if (base == 4) { + havesib = 1; + code = ldub_code(s->pc++); + scale = (code >> 6) & 3; + index = ((code >> 3) & 7) | REX_X(s); + base = (code & 7); + } + base |= REX_B(s); + + switch (mod) { + case 0: + if ((base & 7) == 5) { + base = -1; + disp = (int32_t)ldl_code(s->pc); + s->pc += 4; + if (CODE64(s) && !havesib) { + disp += s->pc + s->rip_offset; + } + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)ldub_code(s->pc++); + break; + default: + case 2: + disp = ldl_code(s->pc); + s->pc += 4; + break; + } + + if (base >= 0) { + /* for correct popl handling with esp */ + if (base == 4 && s->popl_esp_hack) + disp += s->popl_esp_hack; +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_movq_A0_reg[base](); + if (disp != 0) { + if ((int32_t)disp == disp) + gen_op_addq_A0_im(disp); + else + gen_op_addq_A0_im64(disp >> 32, disp); + } + } else +#endif + { + gen_op_movl_A0_reg[base](); + if (disp != 0) + gen_op_addl_A0_im(disp); + } + } else { +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + if ((int32_t)disp == disp) + gen_op_movq_A0_im(disp); + else + gen_op_movq_A0_im64(disp >> 32, disp); + } else +#endif + { + gen_op_movl_A0_im(disp); + } + } + /* XXX: index == 4 is always invalid */ + if (havesib && (index != 4 || scale != 0)) { +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_A0_reg_sN[scale][index](); + } else +#endif + { + gen_op_addl_A0_reg_sN[scale][index](); + } + } + if (must_add_seg) { + if (override < 0) { + if (base == R_EBP || base == R_ESP) + override = R_SS; + else + override = R_DS; + } +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); + } else +#endif + { + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); + } + } + } else { + switch (mod) { + case 0: + if (rm == 6) { + disp = lduw_code(s->pc); + s->pc += 2; + gen_op_movl_A0_im(disp); + rm = 0; /* avoid SS override */ + goto no_rm; + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)ldub_code(s->pc++); + break; + default: + case 2: + disp = lduw_code(s->pc); + s->pc += 2; + break; + } + switch(rm) { + case 0: + gen_op_movl_A0_reg[R_EBX](); + gen_op_addl_A0_reg_sN[0][R_ESI](); + break; + case 1: + gen_op_movl_A0_reg[R_EBX](); + gen_op_addl_A0_reg_sN[0][R_EDI](); + break; + case 2: + gen_op_movl_A0_reg[R_EBP](); + gen_op_addl_A0_reg_sN[0][R_ESI](); + break; + case 3: + gen_op_movl_A0_reg[R_EBP](); + gen_op_addl_A0_reg_sN[0][R_EDI](); + break; + case 4: + gen_op_movl_A0_reg[R_ESI](); + break; + case 5: + gen_op_movl_A0_reg[R_EDI](); + break; + case 6: + gen_op_movl_A0_reg[R_EBP](); + break; + default: + case 7: + gen_op_movl_A0_reg[R_EBX](); + break; + } + if (disp != 0) + gen_op_addl_A0_im(disp); + gen_op_andl_A0_ffff(); + no_rm: + if (must_add_seg) { + if (override < 0) { + if (rm == 2 || rm == 3 || rm == 6) + override = R_SS; + else + override = R_DS; + } + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); + } + } + + opreg = OR_A0; + disp = 0; + *reg_ptr = opreg; + *offset_ptr = disp; +} + +/* used for LEA and MOV AX, mem */ +static void gen_add_A0_ds_seg(DisasContext *s) +{ + int override, must_add_seg; + must_add_seg = s->addseg; + override = R_DS; + if (s->override >= 0) { + override = s->override; + must_add_seg = 1; + } else { + override = R_DS; + } + if (must_add_seg) { +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); + } else +#endif + { + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); + } + } +} + +/* generate modrm memory load or store of 'reg'. TMP0 is used if reg != + OR_TMP0 */ +static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store) +{ + int mod, rm, opreg, disp; + + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod == 3) { + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_TN_reg[ot][0][reg](); + gen_op_mov_reg_T0[ot][rm](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + if (reg != OR_TMP0) + gen_op_mov_reg_T0[ot][reg](); + } + } else { + gen_lea_modrm(s, modrm, &opreg, &disp); + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_TN_reg[ot][0][reg](); + gen_op_st_T0_A0[ot + s->mem_index](); + } else { + gen_op_ld_T0_A0[ot + s->mem_index](); + if (reg != OR_TMP0) + gen_op_mov_reg_T0[ot][reg](); + } + } +} + +static inline uint32_t insn_get(DisasContext *s, int ot) +{ + uint32_t ret; + + switch(ot) { + case OT_BYTE: + ret = ldub_code(s->pc); + s->pc++; + break; + case OT_WORD: + ret = lduw_code(s->pc); + s->pc += 2; + break; + default: + case OT_LONG: + ret = ldl_code(s->pc); + s->pc += 4; + break; + } + return ret; +} + +static inline int insn_const_size(unsigned int ot) +{ + if (ot <= OT_LONG) + return 1 << ot; + else + return 4; +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) +{ + TranslationBlock *tb; + target_ulong pc; + + pc = s->cs_base + eip; + tb = s->tb; + /* NOTE: we handle the case where the TB spans two pages here */ + if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || + (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { + /* jump to same page: we can use a direct jump */ + if (tb_num == 0) + gen_op_goto_tb0(TBPARAM(tb)); + else + gen_op_goto_tb1(TBPARAM(tb)); + gen_jmp_im(eip); + gen_op_movl_T0_im((long)tb + tb_num); + gen_op_exit_tb(); + } else { + /* jump to another page: currently not optimized */ + gen_jmp_im(eip); + gen_eob(s); + } +} + +static inline void gen_jcc(DisasContext *s, int b, + target_ulong val, target_ulong next_eip) +{ + TranslationBlock *tb; + int inv, jcc_op; + GenOpFunc1 *func; + target_ulong tmp; + int l1, l2; + + inv = b & 1; + jcc_op = (b >> 1) & 7; + + if (s->jmp_opt) { + switch(s->cc_op) { + /* we optimize the cmp/jcc case */ + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + case CC_OP_SUBQ: + func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; + break; + + /* some jumps are easy to compute */ + case CC_OP_ADDB: + case CC_OP_ADDW: + case CC_OP_ADDL: + case CC_OP_ADDQ: + + case CC_OP_ADCB: + case CC_OP_ADCW: + case CC_OP_ADCL: + case CC_OP_ADCQ: + + case CC_OP_SBBB: + case CC_OP_SBBW: + case CC_OP_SBBL: + case CC_OP_SBBQ: + + case CC_OP_LOGICB: + case CC_OP_LOGICW: + case CC_OP_LOGICL: + case CC_OP_LOGICQ: + + case CC_OP_INCB: + case CC_OP_INCW: + case CC_OP_INCL: + case CC_OP_INCQ: + + case CC_OP_DECB: + case CC_OP_DECW: + case CC_OP_DECL: + case CC_OP_DECQ: + + case CC_OP_SHLB: + case CC_OP_SHLW: + case CC_OP_SHLL: + case CC_OP_SHLQ: + + case CC_OP_SARB: + case CC_OP_SARW: + case CC_OP_SARL: + case CC_OP_SARQ: + switch(jcc_op) { + case JCC_Z: + func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; + break; + case JCC_S: + func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; + break; + default: + func = NULL; + break; + } + break; + default: + func = NULL; + break; + } + + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + + if (!func) { + gen_setcc_slow[jcc_op](); + func = gen_op_jnz_T0_label; + } + + if (inv) { + tmp = val; + val = next_eip; + next_eip = tmp; + } + tb = s->tb; + + l1 = gen_new_label(); + func(l1); + + gen_goto_tb(s, 0, next_eip); + + gen_set_label(l1); + gen_goto_tb(s, 1, val); + + s->is_jmp = 3; + } else { + + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_setcc_slow[jcc_op](); + if (inv) { + tmp = val; + val = next_eip; + next_eip = tmp; + } + l1 = gen_new_label(); + l2 = gen_new_label(); + gen_op_jnz_T0_label(l1); + gen_jmp_im(next_eip); + gen_op_jmp_label(l2); + gen_set_label(l1); + gen_jmp_im(val); + gen_set_label(l2); + gen_eob(s); + } +} + +static void gen_setcc(DisasContext *s, int b) +{ + int inv, jcc_op; + GenOpFunc *func; + + inv = b & 1; + jcc_op = (b >> 1) & 7; + switch(s->cc_op) { + /* we optimize the cmp/jcc case */ + case CC_OP_SUBB: + case CC_OP_SUBW: + case CC_OP_SUBL: + case CC_OP_SUBQ: + func = gen_setcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; + if (!func) + goto slow_jcc; + break; + + /* some jumps are easy to compute */ + case CC_OP_ADDB: + case CC_OP_ADDW: + case CC_OP_ADDL: + case CC_OP_ADDQ: + + case CC_OP_LOGICB: + case CC_OP_LOGICW: + case CC_OP_LOGICL: + case CC_OP_LOGICQ: + + case CC_OP_INCB: + case CC_OP_INCW: + case CC_OP_INCL: + case CC_OP_INCQ: + + case CC_OP_DECB: + case CC_OP_DECW: + case CC_OP_DECL: + case CC_OP_DECQ: + + case CC_OP_SHLB: + case CC_OP_SHLW: + case CC_OP_SHLL: + case CC_OP_SHLQ: + switch(jcc_op) { + case JCC_Z: + func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; + break; + case JCC_S: + func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; + break; + default: + goto slow_jcc; + } + break; + default: + slow_jcc: + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + func = gen_setcc_slow[jcc_op]; + break; + } + func(); + if (inv) { + gen_op_xor_T0_1(); + } +} + +/* move T0 to seg_reg and compute if the CPU state may change. Never + call this function with seg_reg == R_CS */ +static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) +{ + if (s->pe && !s->vm86) { + /* XXX: optimize by finding processor state dynamically */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(cur_eip); + gen_op_movl_seg_T0(seg_reg); + /* abort translation because the addseg value may change or + because ss32 may change. For R_SS, translation must always + stop as a special handling must be done to disable hardware + interrupts for the next instruction */ + if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) + s->is_jmp = 3; + } else { + gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); + if (seg_reg == R_SS) + s->is_jmp = 3; + } +} + +static inline void gen_stack_update(DisasContext *s, int addend) +{ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + if (addend == 8) + gen_op_addq_ESP_8(); + else + gen_op_addq_ESP_im(addend); + } else +#endif + if (s->ss32) { + if (addend == 2) + gen_op_addl_ESP_2(); + else if (addend == 4) + gen_op_addl_ESP_4(); + else + gen_op_addl_ESP_im(addend); + } else { + if (addend == 2) + gen_op_addw_ESP_2(); + else if (addend == 4) + gen_op_addw_ESP_4(); + else + gen_op_addw_ESP_im(addend); + } +} + +/* generate a push. It depends on ss32, addseg and dflag */ +static void gen_push_T0(DisasContext *s) +{ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_movq_A0_reg[R_ESP](); + if (s->dflag) { + gen_op_subq_A0_8(); + gen_op_st_T0_A0[OT_QUAD + s->mem_index](); + } else { + gen_op_subq_A0_2(); + gen_op_st_T0_A0[OT_WORD + s->mem_index](); + } + gen_op_movq_ESP_A0(); + } else +#endif + { + gen_op_movl_A0_reg[R_ESP](); + if (!s->dflag) + gen_op_subl_A0_2(); + else + gen_op_subl_A0_4(); + if (s->ss32) { + if (s->addseg) { + gen_op_movl_T1_A0(); + gen_op_addl_A0_SS(); + } + } else { + gen_op_andl_A0_ffff(); + gen_op_movl_T1_A0(); + gen_op_addl_A0_SS(); + } + gen_op_st_T0_A0[s->dflag + 1 + s->mem_index](); + if (s->ss32 && !s->addseg) + gen_op_movl_ESP_A0(); + else + gen_op_mov_reg_T1[s->ss32 + 1][R_ESP](); + } +} + +/* generate a push. It depends on ss32, addseg and dflag */ +/* slower version for T1, only used for call Ev */ +static void gen_push_T1(DisasContext *s) +{ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_movq_A0_reg[R_ESP](); + if (s->dflag) { + gen_op_subq_A0_8(); + gen_op_st_T1_A0[OT_QUAD + s->mem_index](); + } else { + gen_op_subq_A0_2(); + gen_op_st_T0_A0[OT_WORD + s->mem_index](); + } + gen_op_movq_ESP_A0(); + } else +#endif + { + gen_op_movl_A0_reg[R_ESP](); + if (!s->dflag) + gen_op_subl_A0_2(); + else + gen_op_subl_A0_4(); + if (s->ss32) { + if (s->addseg) { + gen_op_addl_A0_SS(); + } + } else { + gen_op_andl_A0_ffff(); + gen_op_addl_A0_SS(); + } + gen_op_st_T1_A0[s->dflag + 1 + s->mem_index](); + + if (s->ss32 && !s->addseg) + gen_op_movl_ESP_A0(); + else + gen_stack_update(s, (-2) << s->dflag); + } +} + +/* two step pop is necessary for precise exceptions */ +static void gen_pop_T0(DisasContext *s) +{ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_movq_A0_reg[R_ESP](); + gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index](); + } else +#endif + { + gen_op_movl_A0_reg[R_ESP](); + if (s->ss32) { + if (s->addseg) + gen_op_addl_A0_SS(); + } else { + gen_op_andl_A0_ffff(); + gen_op_addl_A0_SS(); + } + gen_op_ld_T0_A0[s->dflag + 1 + s->mem_index](); + } +} + +static void gen_pop_update(DisasContext *s) +{ +#ifdef TARGET_X86_64 + if (CODE64(s) && s->dflag) { + gen_stack_update(s, 8); + } else +#endif + { + gen_stack_update(s, 2 << s->dflag); + } +} + +static void gen_stack_A0(DisasContext *s) +{ + gen_op_movl_A0_ESP(); + if (!s->ss32) + gen_op_andl_A0_ffff(); + gen_op_movl_T1_A0(); + if (s->addseg) + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); +} + +/* NOTE: wrap around in 16 bit not fully handled */ +static void gen_pusha(DisasContext *s) +{ + int i; + gen_op_movl_A0_ESP(); + gen_op_addl_A0_im(-16 << s->dflag); + if (!s->ss32) + gen_op_andl_A0_ffff(); + gen_op_movl_T1_A0(); + if (s->addseg) + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); + for(i = 0;i < 8; i++) { + gen_op_mov_TN_reg[OT_LONG][0][7 - i](); + gen_op_st_T0_A0[OT_WORD + s->dflag + s->mem_index](); + gen_op_addl_A0_im(2 << s->dflag); + } + gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); +} + +/* NOTE: wrap around in 16 bit not fully handled */ +static void gen_popa(DisasContext *s) +{ + int i; + gen_op_movl_A0_ESP(); + if (!s->ss32) + gen_op_andl_A0_ffff(); + gen_op_movl_T1_A0(); + gen_op_addl_T1_im(16 << s->dflag); + if (s->addseg) + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); + for(i = 0;i < 8; i++) { + /* ESP is not reloaded */ + if (i != 3) { + gen_op_ld_T0_A0[OT_WORD + s->dflag + s->mem_index](); + gen_op_mov_reg_T0[OT_WORD + s->dflag][7 - i](); + } + gen_op_addl_A0_im(2 << s->dflag); + } + gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); +} + +static void gen_enter(DisasContext *s, int esp_addend, int level) +{ + int ot, opsize; + + level &= 0x1f; +#ifdef TARGET_X86_64 + if (CODE64(s)) { + ot = s->dflag ? OT_QUAD : OT_WORD; + opsize = 1 << ot; + + gen_op_movl_A0_ESP(); + gen_op_addq_A0_im(-opsize); + gen_op_movl_T1_A0(); + + /* push bp */ + gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); + gen_op_st_T0_A0[ot + s->mem_index](); + if (level) { + gen_op_enter64_level(level, (ot == OT_QUAD)); + } + gen_op_mov_reg_T1[ot][R_EBP](); + gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); + gen_op_mov_reg_T1[OT_QUAD][R_ESP](); + } else +#endif + { + ot = s->dflag + OT_WORD; + opsize = 2 << s->dflag; + + gen_op_movl_A0_ESP(); + gen_op_addl_A0_im(-opsize); + if (!s->ss32) + gen_op_andl_A0_ffff(); + gen_op_movl_T1_A0(); + if (s->addseg) + gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); + /* push bp */ + gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); + gen_op_st_T0_A0[ot + s->mem_index](); + if (level) { + gen_op_enter_level(level, s->dflag); + } + gen_op_mov_reg_T1[ot][R_EBP](); + gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); + gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); + } +} + +static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) +{ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(cur_eip); + gen_op_raise_exception(trapno); + s->is_jmp = 3; +} + +/* an interrupt is different from an exception because of the + priviledge checks */ +static void gen_interrupt(DisasContext *s, int intno, + target_ulong cur_eip, target_ulong next_eip) +{ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(cur_eip); + gen_op_raise_interrupt(intno, (int)(next_eip - cur_eip)); + s->is_jmp = 3; +} + +static void gen_debug(DisasContext *s, target_ulong cur_eip) +{ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(cur_eip); + gen_op_debug(); + s->is_jmp = 3; +} + +/* generate a generic end of block. Trace exception is also generated + if needed */ +static void gen_eob(DisasContext *s) +{ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { + gen_op_reset_inhibit_irq(); + } + if (s->singlestep_enabled) { + gen_op_debug(); + } else if (s->tf) { + gen_op_raise_exception(EXCP01_SSTP); + } else { + gen_op_movl_T0_0(); + gen_op_exit_tb(); + } + s->is_jmp = 3; +} + +/* generate a jump to eip. No segment change must happen before as a + direct call to the next block may occur */ +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) +{ + if (s->jmp_opt) { + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_goto_tb(s, tb_num, eip); + s->is_jmp = 3; + } else { + gen_jmp_im(eip); + gen_eob(s); + } +} + +static void gen_jmp(DisasContext *s, target_ulong eip) +{ + gen_jmp_tb(s, eip, 0); +} + +static void gen_movtl_T0_im(target_ulong val) +{ +#ifdef TARGET_X86_64 + if ((int32_t)val == val) { + gen_op_movl_T0_im(val); + } else { + gen_op_movq_T0_im64(val >> 32, val); + } +#else + gen_op_movl_T0_im(val); +#endif +} + +static void gen_movtl_T1_im(target_ulong val) +{ +#ifdef TARGET_X86_64 + if ((int32_t)val == val) { + gen_op_movl_T1_im(val); + } else { + gen_op_movq_T1_im64(val >> 32, val); + } +#else + gen_op_movl_T1_im(val); +#endif +} + +static void gen_add_A0_im(DisasContext *s, int val) +{ +#ifdef TARGET_X86_64 + if (CODE64(s)) + gen_op_addq_A0_im(val); + else +#endif + gen_op_addl_A0_im(val); +} + +static GenOpFunc1 *gen_ldq_env_A0[3] = { + gen_op_ldq_raw_env_A0, +#ifndef CONFIG_USER_ONLY + gen_op_ldq_kernel_env_A0, + gen_op_ldq_user_env_A0, +#endif +}; + +static GenOpFunc1 *gen_stq_env_A0[3] = { + gen_op_stq_raw_env_A0, +#ifndef CONFIG_USER_ONLY + gen_op_stq_kernel_env_A0, + gen_op_stq_user_env_A0, +#endif +}; + +static GenOpFunc1 *gen_ldo_env_A0[3] = { + gen_op_ldo_raw_env_A0, +#ifndef CONFIG_USER_ONLY + gen_op_ldo_kernel_env_A0, + gen_op_ldo_user_env_A0, +#endif +}; + +static GenOpFunc1 *gen_sto_env_A0[3] = { + gen_op_sto_raw_env_A0, +#ifndef CONFIG_USER_ONLY + gen_op_sto_kernel_env_A0, + gen_op_sto_user_env_A0, +#endif +}; + +#define SSE_SPECIAL ((GenOpFunc2 *)1) + +#define MMX_OP2(x) { gen_op_ ## x ## _mmx, gen_op_ ## x ## _xmm } +#define SSE_FOP(x) { gen_op_ ## x ## ps, gen_op_ ## x ## pd, \ + gen_op_ ## x ## ss, gen_op_ ## x ## sd, } + +static GenOpFunc2 *sse_op_table1[256][4] = { + /* pure SSE operations */ + [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ + [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ + [0x14] = { gen_op_punpckldq_xmm, gen_op_punpcklqdq_xmm }, + [0x15] = { gen_op_punpckhdq_xmm, gen_op_punpckhqdq_xmm }, + [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ + [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ + + [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ + [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */ + [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ + [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ + [0x2e] = { gen_op_ucomiss, gen_op_ucomisd }, + [0x2f] = { gen_op_comiss, gen_op_comisd }, + [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ + [0x51] = SSE_FOP(sqrt), + [0x52] = { gen_op_rsqrtps, NULL, gen_op_rsqrtss, NULL }, + [0x53] = { gen_op_rcpps, NULL, gen_op_rcpss, NULL }, + [0x54] = { gen_op_pand_xmm, gen_op_pand_xmm }, /* andps, andpd */ + [0x55] = { gen_op_pandn_xmm, gen_op_pandn_xmm }, /* andnps, andnpd */ + [0x56] = { gen_op_por_xmm, gen_op_por_xmm }, /* orps, orpd */ + [0x57] = { gen_op_pxor_xmm, gen_op_pxor_xmm }, /* xorps, xorpd */ + [0x58] = SSE_FOP(add), + [0x59] = SSE_FOP(mul), + [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps, + gen_op_cvtss2sd, gen_op_cvtsd2ss }, + [0x5b] = { gen_op_cvtdq2ps, gen_op_cvtps2dq, gen_op_cvttps2dq }, + [0x5c] = SSE_FOP(sub), + [0x5d] = SSE_FOP(min), + [0x5e] = SSE_FOP(div), + [0x5f] = SSE_FOP(max), + + [0xc2] = SSE_FOP(cmpeq), + [0xc6] = { (GenOpFunc2 *)gen_op_shufps, (GenOpFunc2 *)gen_op_shufpd }, + + /* MMX ops and their SSE extensions */ + [0x60] = MMX_OP2(punpcklbw), + [0x61] = MMX_OP2(punpcklwd), + [0x62] = MMX_OP2(punpckldq), + [0x63] = MMX_OP2(packsswb), + [0x64] = MMX_OP2(pcmpgtb), + [0x65] = MMX_OP2(pcmpgtw), + [0x66] = MMX_OP2(pcmpgtl), + [0x67] = MMX_OP2(packuswb), + [0x68] = MMX_OP2(punpckhbw), + [0x69] = MMX_OP2(punpckhwd), + [0x6a] = MMX_OP2(punpckhdq), + [0x6b] = MMX_OP2(packssdw), + [0x6c] = { NULL, gen_op_punpcklqdq_xmm }, + [0x6d] = { NULL, gen_op_punpckhqdq_xmm }, + [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ + [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ + [0x70] = { (GenOpFunc2 *)gen_op_pshufw_mmx, + (GenOpFunc2 *)gen_op_pshufd_xmm, + (GenOpFunc2 *)gen_op_pshufhw_xmm, + (GenOpFunc2 *)gen_op_pshuflw_xmm }, + [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ + [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ + [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ + [0x74] = MMX_OP2(pcmpeqb), + [0x75] = MMX_OP2(pcmpeqw), + [0x76] = MMX_OP2(pcmpeql), + [0x77] = { SSE_SPECIAL }, /* emms */ + [0x7c] = { NULL, gen_op_haddpd, NULL, gen_op_haddps }, + [0x7d] = { NULL, gen_op_hsubpd, NULL, gen_op_hsubps }, + [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ + [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ + [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ + [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ + [0xd0] = { NULL, gen_op_addsubpd, NULL, gen_op_addsubps }, + [0xd1] = MMX_OP2(psrlw), + [0xd2] = MMX_OP2(psrld), + [0xd3] = MMX_OP2(psrlq), + [0xd4] = MMX_OP2(paddq), + [0xd5] = MMX_OP2(pmullw), + [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ + [0xd8] = MMX_OP2(psubusb), + [0xd9] = MMX_OP2(psubusw), + [0xda] = MMX_OP2(pminub), + [0xdb] = MMX_OP2(pand), + [0xdc] = MMX_OP2(paddusb), + [0xdd] = MMX_OP2(paddusw), + [0xde] = MMX_OP2(pmaxub), + [0xdf] = MMX_OP2(pandn), + [0xe0] = MMX_OP2(pavgb), + [0xe1] = MMX_OP2(psraw), + [0xe2] = MMX_OP2(psrad), + [0xe3] = MMX_OP2(pavgw), + [0xe4] = MMX_OP2(pmulhuw), + [0xe5] = MMX_OP2(pmulhw), + [0xe6] = { NULL, gen_op_cvttpd2dq, gen_op_cvtdq2pd, gen_op_cvtpd2dq }, + [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ + [0xe8] = MMX_OP2(psubsb), + [0xe9] = MMX_OP2(psubsw), + [0xea] = MMX_OP2(pminsw), + [0xeb] = MMX_OP2(por), + [0xec] = MMX_OP2(paddsb), + [0xed] = MMX_OP2(paddsw), + [0xee] = MMX_OP2(pmaxsw), + [0xef] = MMX_OP2(pxor), + [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ + [0xf1] = MMX_OP2(psllw), + [0xf2] = MMX_OP2(pslld), + [0xf3] = MMX_OP2(psllq), + [0xf4] = MMX_OP2(pmuludq), + [0xf5] = MMX_OP2(pmaddwd), + [0xf6] = MMX_OP2(psadbw), + [0xf7] = MMX_OP2(maskmov), + [0xf8] = MMX_OP2(psubb), + [0xf9] = MMX_OP2(psubw), + [0xfa] = MMX_OP2(psubl), + [0xfb] = MMX_OP2(psubq), + [0xfc] = MMX_OP2(paddb), + [0xfd] = MMX_OP2(paddw), + [0xfe] = MMX_OP2(paddl), +}; + +static GenOpFunc2 *sse_op_table2[3 * 8][2] = { + [0 + 2] = MMX_OP2(psrlw), + [0 + 4] = MMX_OP2(psraw), + [0 + 6] = MMX_OP2(psllw), + [8 + 2] = MMX_OP2(psrld), + [8 + 4] = MMX_OP2(psrad), + [8 + 6] = MMX_OP2(pslld), + [16 + 2] = MMX_OP2(psrlq), + [16 + 3] = { NULL, gen_op_psrldq_xmm }, + [16 + 6] = MMX_OP2(psllq), + [16 + 7] = { NULL, gen_op_pslldq_xmm }, +}; + +static GenOpFunc1 *sse_op_table3[4 * 3] = { + gen_op_cvtsi2ss, + gen_op_cvtsi2sd, + X86_64_ONLY(gen_op_cvtsq2ss), + X86_64_ONLY(gen_op_cvtsq2sd), + + gen_op_cvttss2si, + gen_op_cvttsd2si, + X86_64_ONLY(gen_op_cvttss2sq), + X86_64_ONLY(gen_op_cvttsd2sq), + + gen_op_cvtss2si, + gen_op_cvtsd2si, + X86_64_ONLY(gen_op_cvtss2sq), + X86_64_ONLY(gen_op_cvtsd2sq), +}; + +static GenOpFunc2 *sse_op_table4[8][4] = { + SSE_FOP(cmpeq), + SSE_FOP(cmplt), + SSE_FOP(cmple), + SSE_FOP(cmpunord), + SSE_FOP(cmpneq), + SSE_FOP(cmpnlt), + SSE_FOP(cmpnle), + SSE_FOP(cmpord), +}; + +static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) +{ + int b1, op1_offset, op2_offset, is_xmm, val, ot; + int modrm, mod, rm, reg, reg_addr, offset_addr; + GenOpFunc2 *sse_op2; + GenOpFunc3 *sse_op3; + + b &= 0xff; + if (s->prefix & PREFIX_DATA) + b1 = 1; + else if (s->prefix & PREFIX_REPZ) + b1 = 2; + else if (s->prefix & PREFIX_REPNZ) + b1 = 3; + else + b1 = 0; + sse_op2 = sse_op_table1[b][b1]; + if (!sse_op2) + goto illegal_op; + if (b <= 0x5f || b == 0xc6 || b == 0xc2) { + is_xmm = 1; + } else { + if (b1 == 0) { + /* MMX case */ + is_xmm = 0; + } else { + is_xmm = 1; + } + } + /* simple MMX/SSE operation */ + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + return; + } + if (s->flags & HF_EM_MASK) { + illegal_op: + gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); + return; + } + if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) + goto illegal_op; + if (b == 0x77) { + /* emms */ + gen_op_emms(); + return; + } + /* prepare MMX state (XXX: optimize by storing fptt and fptags in + the static cpu state) */ + if (!is_xmm) { + gen_op_enter_mmx(); + } + + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7); + if (is_xmm) + reg |= rex_r; + mod = (modrm >> 6) & 3; + if (sse_op2 == SSE_SPECIAL) { + b |= (b1 << 8); + switch(b) { + case 0x0e7: /* movntq */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx)); + break; + case 0x1e7: /* movntdq */ + case 0x02b: /* movntps */ + case 0x12b: /* movntps */ + case 0x3f0: /* lddqu */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg])); + break; + case 0x6e: /* movd mm, ea */ + gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); + gen_op_movl_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); + break; + case 0x16e: /* movd xmm, ea */ + gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); + gen_op_movl_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); + break; + case 0x6f: /* movq mm, ea */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + gen_op_movq(offsetof(CPUX86State,fpregs[reg].mmx), + offsetof(CPUX86State,fpregs[rm].mmx)); + } + break; + case 0x010: /* movups */ + case 0x110: /* movupd */ + case 0x028: /* movaps */ + case 0x128: /* movapd */ + case 0x16f: /* movdqa xmm, ea */ + case 0x26f: /* movdqu xmm, ea */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]), + offsetof(CPUX86State,xmm_regs[rm])); + } + break; + case 0x210: /* movss xmm, ea */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[OT_LONG + s->mem_index](); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_op_movl_T0_0(); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); + } + break; + case 0x310: /* movsd xmm, ea */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + gen_op_movl_T0_0(); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + break; + case 0x012: /* movlps */ + case 0x112: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + /* movhlps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); + } + break; + case 0x212: /* movsldup */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))); + } + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + break; + case 0x312: /* movddup */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + break; + case 0x016: /* movhps */ + case 0x116: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + } else { + /* movlhps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + break; + case 0x216: /* movshdup */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldo_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))); + } + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); + gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + break; + case 0x7e: /* movd ea, mm */ + gen_op_movl_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); + gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); + break; + case 0x17e: /* movd ea, xmm */ + gen_op_movl_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); + gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); + break; + case 0x27e: /* movq xmm, ea */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + break; + case 0x7f: /* movq ea, mm */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx), + offsetof(CPUX86State,fpregs[reg].mmx)); + } + break; + case 0x011: /* movups */ + case 0x111: /* movupd */ + case 0x029: /* movaps */ + case 0x129: /* movapd */ + case 0x17f: /* movdqa ea, xmm */ + case 0x27f: /* movdqu ea, xmm */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_sto_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]), + offsetof(CPUX86State,xmm_regs[reg])); + } + break; + case 0x211: /* movss ea, xmm */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_movl_T0_env(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_op_st_T0_A0[OT_LONG + s->mem_index](); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + } + break; + case 0x311: /* movsd ea, xmm */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } + break; + case 0x013: /* movlps */ + case 0x113: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + goto illegal_op; + } + break; + case 0x017: /* movhps */ + case 0x117: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + } else { + goto illegal_op; + } + break; + case 0x71: /* shift mm, im */ + case 0x72: + case 0x73: + case 0x171: /* shift xmm, im */ + case 0x172: + case 0x173: + val = ldub_code(s->pc++); + if (is_xmm) { + gen_op_movl_T0_im(val); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0))); + gen_op_movl_T0_0(); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(1))); + op1_offset = offsetof(CPUX86State,xmm_t0); + } else { + gen_op_movl_T0_im(val); + gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(0))); + gen_op_movl_T0_0(); + gen_op_movl_env_T0(offsetof(CPUX86State,mmx_t0.MMX_L(1))); + op1_offset = offsetof(CPUX86State,mmx_t0); + } + sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; + if (!sse_op2) + goto illegal_op; + if (is_xmm) { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + sse_op2(op2_offset, op1_offset); + break; + case 0x050: /* movmskps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movmskps(offsetof(CPUX86State,xmm_regs[rm])); + gen_op_mov_reg_T0[OT_LONG][reg](); + break; + case 0x150: /* movmskpd */ + rm = (modrm & 7) | REX_B(s); + gen_op_movmskpd(offsetof(CPUX86State,xmm_regs[rm])); + gen_op_mov_reg_T0[OT_LONG][reg](); + break; + case 0x02a: /* cvtpi2ps */ + case 0x12a: /* cvtpi2pd */ + gen_op_enter_mmx(); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0[s->mem_index >> 2](op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + switch(b >> 8) { + case 0x0: + gen_op_cvtpi2ps(op1_offset, op2_offset); + break; + default: + case 0x1: + gen_op_cvtpi2pd(op1_offset, op2_offset); + break; + } + break; + case 0x22a: /* cvtsi2ss */ + case 0x32a: /* cvtsi2sd */ + ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)](op1_offset); + break; + case 0x02c: /* cvttps2pi */ + case 0x12c: /* cvttpd2pi */ + case 0x02d: /* cvtps2pi */ + case 0x12d: /* cvtpd2pi */ + gen_op_enter_mmx(); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_ldo_env_A0[s->mem_index >> 2](op2_offset); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); + switch(b) { + case 0x02c: + gen_op_cvttps2pi(op1_offset, op2_offset); + break; + case 0x12c: + gen_op_cvttpd2pi(op1_offset, op2_offset); + break; + case 0x02d: + gen_op_cvtps2pi(op1_offset, op2_offset); + break; + case 0x12d: + gen_op_cvtpd2pi(op1_offset, op2_offset); + break; + } + break; + case 0x22c: /* cvttss2si */ + case 0x32c: /* cvttsd2si */ + case 0x22d: /* cvtss2si */ + case 0x32d: /* cvtsd2si */ + ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if ((b >> 8) & 1) { + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_Q(0))); + } else { + gen_op_ld_T0_A0[OT_LONG + s->mem_index](); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0))); + } + op2_offset = offsetof(CPUX86State,xmm_t0); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + + (b & 1) * 4](op2_offset); + gen_op_mov_reg_T0[ot][reg](); + break; + case 0xc4: /* pinsrw */ + case 0x1c4: + s->rip_offset = 1; + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + val = ldub_code(s->pc++); + if (b1) { + val &= 7; + gen_op_pinsrw_xmm(offsetof(CPUX86State,xmm_regs[reg]), val); + } else { + val &= 3; + gen_op_pinsrw_mmx(offsetof(CPUX86State,fpregs[reg].mmx), val); + } + break; + case 0xc5: /* pextrw */ + case 0x1c5: + if (mod != 3) + goto illegal_op; + val = ldub_code(s->pc++); + if (b1) { + val &= 7; + rm = (modrm & 7) | REX_B(s); + gen_op_pextrw_xmm(offsetof(CPUX86State,xmm_regs[rm]), val); + } else { + val &= 3; + rm = (modrm & 7); + gen_op_pextrw_mmx(offsetof(CPUX86State,fpregs[rm].mmx), val); + } + reg = ((modrm >> 3) & 7) | rex_r; + gen_op_mov_reg_T0[OT_LONG][reg](); + break; + case 0x1d6: /* movq ea, xmm */ + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_stq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); + } + break; + case 0x2d6: /* movq2dq */ + gen_op_enter_mmx(); + rm = (modrm & 7); + gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,fpregs[rm].mmx)); + gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + break; + case 0x3d6: /* movdq2q */ + gen_op_enter_mmx(); + rm = (modrm & 7) | REX_B(s); + gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + break; + case 0xd7: /* pmovmskb */ + case 0x1d7: + if (mod != 3) + goto illegal_op; + if (b1) { + rm = (modrm & 7) | REX_B(s); + gen_op_pmovmskb_xmm(offsetof(CPUX86State,xmm_regs[rm])); + } else { + rm = (modrm & 7); + gen_op_pmovmskb_mmx(offsetof(CPUX86State,fpregs[rm].mmx)); + } + reg = ((modrm >> 3) & 7) | rex_r; + gen_op_mov_reg_T0[OT_LONG][reg](); + break; + default: + goto illegal_op; + } + } else { + /* generic MMX or SSE operation */ + switch(b) { + case 0xf7: + /* maskmov : we must prepare A0 */ + if (mod != 3) + goto illegal_op; +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_movq_A0_reg[R_EDI](); + } else +#endif + { + gen_op_movl_A0_reg[R_EDI](); + if (s->aflag == 0) + gen_op_andl_A0_ffff(); + } + gen_add_A0_ds_seg(s); + break; + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + case 0xc2: /* compare insns */ + s->rip_offset = 1; + break; + default: + break; + } + if (is_xmm) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + op2_offset = offsetof(CPUX86State,xmm_t0); + if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || + b == 0xc2)) { + /* specific case for SSE single instructions */ + if (b1 == 2) { + /* 32 bit access */ + gen_op_ld_T0_A0[OT_LONG + s->mem_index](); + gen_op_movl_env_T0(offsetof(CPUX86State,xmm_t0.XMM_L(0))); + } else { + /* 64 bit access */ + gen_ldq_env_A0[s->mem_index >> 2](offsetof(CPUX86State,xmm_t0.XMM_D(0))); + } + } else { + gen_ldo_env_A0[s->mem_index >> 2](op2_offset); + } + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0[s->mem_index >> 2](op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + } + switch(b) { + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + val = ldub_code(s->pc++); + sse_op3 = (GenOpFunc3 *)sse_op2; + sse_op3(op1_offset, op2_offset, val); + break; + case 0xc2: + /* compare insns */ + val = ldub_code(s->pc++); + if (val >= 8) + goto illegal_op; + sse_op2 = sse_op_table4[val][b1]; + sse_op2(op1_offset, op2_offset); + break; + default: + sse_op2(op1_offset, op2_offset); + break; + } + if (b == 0x2e || b == 0x2f) { + s->cc_op = CC_OP_EFLAGS; + } + } +} + + +/* convert one instruction. s->is_jmp is set if the translation must + be stopped. Return the next pc value */ +static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) +{ + int b, prefixes, aflag, dflag; + int shift, ot; + int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val; + target_ulong next_eip, tval; + int rex_w, rex_r; + + s->pc = pc_start; + prefixes = 0; + aflag = s->code32; + dflag = s->code32; + s->override = -1; + rex_w = -1; + rex_r = 0; +#ifdef TARGET_X86_64 + s->rex_x = 0; + s->rex_b = 0; + x86_64_hregs = 0; +#endif + s->rip_offset = 0; /* for relative ip address */ + next_byte: + b = ldub_code(s->pc); + s->pc++; + /* check prefixes */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + switch (b) { + case 0xf3: + prefixes |= PREFIX_REPZ; + goto next_byte; + case 0xf2: + prefixes |= PREFIX_REPNZ; + goto next_byte; + case 0xf0: + prefixes |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + prefixes |= PREFIX_DATA; + goto next_byte; + case 0x67: + prefixes |= PREFIX_ADR; + goto next_byte; + case 0x40 ... 0x4f: + /* REX prefix */ + rex_w = (b >> 3) & 1; + rex_r = (b & 0x4) << 1; + s->rex_x = (b & 0x2) << 2; + REX_B(s) = (b & 0x1) << 3; + x86_64_hregs = 1; /* select uniform byte register addressing */ + goto next_byte; + } + if (rex_w == 1) { + /* 0x66 is ignored if rex.w is set */ + dflag = 2; + } else { + if (prefixes & PREFIX_DATA) + dflag ^= 1; + } + if (!(prefixes & PREFIX_ADR)) + aflag = 2; + } else +#endif + { + switch (b) { + case 0xf3: + prefixes |= PREFIX_REPZ; + goto next_byte; + case 0xf2: + prefixes |= PREFIX_REPNZ; + goto next_byte; + case 0xf0: + prefixes |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + prefixes |= PREFIX_DATA; + goto next_byte; + case 0x67: + prefixes |= PREFIX_ADR; + goto next_byte; + } + if (prefixes & PREFIX_DATA) + dflag ^= 1; + if (prefixes & PREFIX_ADR) + aflag ^= 1; + } + + s->prefix = prefixes; + s->aflag = aflag; + s->dflag = dflag; + + /* lock generation */ + if (prefixes & PREFIX_LOCK) + gen_op_lock(); + + /* now check op code */ + reswitch: + switch(b) { + case 0x0f: + /**************************/ + /* extended op code */ + b = ldub_code(s->pc++) | 0x100; + goto reswitch; + + /**************************/ + /* arith & logic */ + case 0x00 ... 0x05: + case 0x08 ... 0x0d: + case 0x10 ... 0x15: + case 0x18 ... 0x1d: + case 0x20 ... 0x25: + case 0x28 ... 0x2d: + case 0x30 ... 0x35: + case 0x38 ... 0x3d: + { + int op, f, val; + op = (b >> 3) & 7; + f = (b >> 1) & 3; + + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + switch(f) { + case 0: /* OP Ev, Gv */ + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + opreg = OR_TMP0; + } else if (op == OP_XORL && rm == reg) { + xor_zero: + /* xor reg, reg optimisation */ + gen_op_movl_T0_0(); + s->cc_op = CC_OP_LOGICB + ot; + gen_op_mov_reg_T0[ot][reg](); + gen_op_update1_cc(); + break; + } else { + opreg = rm; + } + gen_op_mov_TN_reg[ot][1][reg](); + gen_op(s, op, ot, opreg); + break; + case 1: /* OP Gv, Ev */ + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T1_A0[ot + s->mem_index](); + } else if (op == OP_XORL && rm == reg) { + goto xor_zero; + } else { + gen_op_mov_TN_reg[ot][1][rm](); + } + gen_op(s, op, ot, reg); + break; + case 2: /* OP A, Iv */ + val = insn_get(s, ot); + gen_op_movl_T1_im(val); + gen_op(s, op, ot, OR_EAX); + break; + } + } + break; + + case 0x80: /* GRP1 */ + case 0x81: + case 0x82: + case 0x83: + { + int val; + + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (b == 0x83) + s->rip_offset = 1; + else + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + opreg = OR_TMP0; + } else { + opreg = rm; + } + + switch(b) { + default: + case 0x80: + case 0x81: + case 0x82: + val = insn_get(s, ot); + break; + case 0x83: + val = (int8_t)insn_get(s, OT_BYTE); + break; + } + gen_op_movl_T1_im(val); + gen_op(s, op, ot, opreg); + } + break; + + /**************************/ + /* inc, dec, and other misc arith */ + case 0x40 ... 0x47: /* inc Gv */ + ot = dflag ? OT_LONG : OT_WORD; + gen_inc(s, ot, OR_EAX + (b & 7), 1); + break; + case 0x48 ... 0x4f: /* dec Gv */ + ot = dflag ? OT_LONG : OT_WORD; + gen_inc(s, ot, OR_EAX + (b & 7), -1); + break; + case 0xf6: /* GRP3 */ + case 0xf7: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (mod != 3) { + if (op == 0) + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + + switch(op) { + case 0: /* test */ + val = insn_get(s, ot); + gen_op_movl_T1_im(val); + gen_op_testl_T0_T1_cc(); + s->cc_op = CC_OP_LOGICB + ot; + break; + case 2: /* not */ + gen_op_notl_T0(); + if (mod != 3) { + gen_op_st_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_reg_T0[ot][rm](); + } + break; + case 3: /* neg */ + gen_op_negl_T0(); + if (mod != 3) { + gen_op_st_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_reg_T0[ot][rm](); + } + gen_op_update_neg_cc(); + s->cc_op = CC_OP_SUBB + ot; + break; + case 4: /* mul */ + switch(ot) { + case OT_BYTE: + gen_op_mulb_AL_T0(); + s->cc_op = CC_OP_MULB; + break; + case OT_WORD: + gen_op_mulw_AX_T0(); + s->cc_op = CC_OP_MULW; + break; + default: + case OT_LONG: + gen_op_mull_EAX_T0(); + s->cc_op = CC_OP_MULL; + break; +#ifdef TARGET_X86_64 + case OT_QUAD: + gen_op_mulq_EAX_T0(); + s->cc_op = CC_OP_MULQ; + break; +#endif + } + break; + case 5: /* imul */ + switch(ot) { + case OT_BYTE: + gen_op_imulb_AL_T0(); + s->cc_op = CC_OP_MULB; + break; + case OT_WORD: + gen_op_imulw_AX_T0(); + s->cc_op = CC_OP_MULW; + break; + default: + case OT_LONG: + gen_op_imull_EAX_T0(); + s->cc_op = CC_OP_MULL; + break; +#ifdef TARGET_X86_64 + case OT_QUAD: + gen_op_imulq_EAX_T0(); + s->cc_op = CC_OP_MULQ; + break; +#endif + } + break; + case 6: /* div */ + switch(ot) { + case OT_BYTE: + gen_jmp_im(pc_start - s->cs_base); + gen_op_divb_AL_T0(); + break; + case OT_WORD: + gen_jmp_im(pc_start - s->cs_base); + gen_op_divw_AX_T0(); + break; + default: + case OT_LONG: + gen_jmp_im(pc_start - s->cs_base); + gen_op_divl_EAX_T0(); + break; +#ifdef TARGET_X86_64 + case OT_QUAD: + gen_jmp_im(pc_start - s->cs_base); + gen_op_divq_EAX_T0(); + break; +#endif + } + break; + case 7: /* idiv */ + switch(ot) { + case OT_BYTE: + gen_jmp_im(pc_start - s->cs_base); + gen_op_idivb_AL_T0(); + break; + case OT_WORD: + gen_jmp_im(pc_start - s->cs_base); + gen_op_idivw_AX_T0(); + break; + default: + case OT_LONG: + gen_jmp_im(pc_start - s->cs_base); + gen_op_idivl_EAX_T0(); + break; +#ifdef TARGET_X86_64 + case OT_QUAD: + gen_jmp_im(pc_start - s->cs_base); + gen_op_idivq_EAX_T0(); + break; +#endif + } + break; + default: + goto illegal_op; + } + break; + + case 0xfe: /* GRP4 */ + case 0xff: /* GRP5 */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (op >= 2 && b == 0xfe) { + goto illegal_op; + } + if (CODE64(s)) { + if (op == 2 || op == 4) { + /* operand size for jumps is 64 bit */ + ot = OT_QUAD; + } else if (op == 3 || op == 5) { + /* for call calls, the operand is 16 or 32 bit, even + in long mode */ + ot = dflag ? OT_LONG : OT_WORD; + } else if (op == 6) { + /* default push size is 64 bit */ + ot = dflag ? OT_QUAD : OT_WORD; + } + } + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (op >= 2 && op != 3 && op != 5) + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + + switch(op) { + case 0: /* inc Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, 1); + break; + case 1: /* dec Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, -1); + break; + case 2: /* call Ev */ + /* XXX: optimize if memory (no 'and' is necessary) */ + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + next_eip = s->pc - s->cs_base; + gen_movtl_T1_im(next_eip); + gen_push_T1(s); + gen_op_jmp_T0(); + gen_eob(s); + break; + case 3: /* lcall Ev */ + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); + do_lcall: + if (s->pe && !s->vm86) { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_lcall_protected_T0_T1(dflag, s->pc - pc_start); + } else { + gen_op_lcall_real_T0_T1(dflag, s->pc - s->cs_base); + } + gen_eob(s); + break; + case 4: /* jmp Ev */ + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + gen_op_jmp_T0(); + gen_eob(s); + break; + case 5: /* ljmp Ev */ + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); + do_ljmp: + if (s->pe && !s->vm86) { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_ljmp_protected_T0_T1(s->pc - pc_start); + } else { + gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); + gen_op_movl_T0_T1(); + gen_op_jmp_T0(); + } + gen_eob(s); + break; + case 6: /* push Ev */ + gen_push_T0(s); + break; + default: + goto illegal_op; + } + break; + + case 0x84: /* test Ev, Gv */ + case 0x85: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + gen_op_mov_TN_reg[ot][1][reg](); + gen_op_testl_T0_T1_cc(); + s->cc_op = CC_OP_LOGICB + ot; + break; + + case 0xa8: /* test eAX, Iv */ + case 0xa9: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + val = insn_get(s, ot); + + gen_op_mov_TN_reg[ot][0][OR_EAX](); + gen_op_movl_T1_im(val); + gen_op_testl_T0_T1_cc(); + s->cc_op = CC_OP_LOGICB + ot; + break; + + case 0x98: /* CWDE/CBW */ +#ifdef TARGET_X86_64 + if (dflag == 2) { + gen_op_movslq_RAX_EAX(); + } else +#endif + if (dflag == 1) + gen_op_movswl_EAX_AX(); + else + gen_op_movsbw_AX_AL(); + break; + case 0x99: /* CDQ/CWD */ +#ifdef TARGET_X86_64 + if (dflag == 2) { + gen_op_movsqo_RDX_RAX(); + } else +#endif + if (dflag == 1) + gen_op_movslq_EDX_EAX(); + else + gen_op_movswl_DX_AX(); + break; + case 0x1af: /* imul Gv, Ev */ + case 0x69: /* imul Gv, Ev, I */ + case 0x6b: + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + if (b == 0x69) + s->rip_offset = insn_const_size(ot); + else if (b == 0x6b) + s->rip_offset = 1; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + if (b == 0x69) { + val = insn_get(s, ot); + gen_op_movl_T1_im(val); + } else if (b == 0x6b) { + val = (int8_t)insn_get(s, OT_BYTE); + gen_op_movl_T1_im(val); + } else { + gen_op_mov_TN_reg[ot][1][reg](); + } + +#ifdef TARGET_X86_64 + if (ot == OT_QUAD) { + gen_op_imulq_T0_T1(); + } else +#endif + if (ot == OT_LONG) { + gen_op_imull_T0_T1(); + } else { + gen_op_imulw_T0_T1(); + } + gen_op_mov_reg_T0[ot][reg](); + s->cc_op = CC_OP_MULB + ot; + break; + case 0x1c0: + case 0x1c1: /* xadd Ev, Gv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_TN_reg[ot][0][reg](); + gen_op_mov_TN_reg[ot][1][rm](); + gen_op_addl_T0_T1(); + gen_op_mov_reg_T1[ot][reg](); + gen_op_mov_reg_T0[ot][rm](); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_mov_TN_reg[ot][0][reg](); + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_op_addl_T0_T1(); + gen_op_st_T0_A0[ot + s->mem_index](); + gen_op_mov_reg_T1[ot][reg](); + } + gen_op_update2_cc(); + s->cc_op = CC_OP_ADDB + ot; + break; + case 0x1b0: + case 0x1b1: /* cmpxchg Ev, Gv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + gen_op_mov_TN_reg[ot][1][reg](); + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_TN_reg[ot][0][rm](); + gen_op_cmpxchg_T0_T1_EAX_cc[ot](); + gen_op_mov_reg_T0[ot][rm](); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_op_cmpxchg_mem_T0_T1_EAX_cc[ot + s->mem_index](); + } + s->cc_op = CC_OP_SUBB + ot; + break; + case 0x1c7: /* cmpxchg8b */ + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_cmpxchg8b(); + s->cc_op = CC_OP_EFLAGS; + break; + + /**************************/ + /* push/pop */ + case 0x50 ... 0x57: /* push */ + gen_op_mov_TN_reg[OT_LONG][0][(b & 7) | REX_B(s)](); + gen_push_T0(s); + break; + case 0x58 ... 0x5f: /* pop */ + if (CODE64(s)) { + ot = dflag ? OT_QUAD : OT_WORD; + } else { + ot = dflag + OT_WORD; + } + gen_pop_T0(s); + /* NOTE: order is important for pop %sp */ + gen_pop_update(s); + gen_op_mov_reg_T0[ot][(b & 7) | REX_B(s)](); + break; + case 0x60: /* pusha */ + if (CODE64(s)) + goto illegal_op; + gen_pusha(s); + break; + case 0x61: /* popa */ + if (CODE64(s)) + goto illegal_op; + gen_popa(s); + break; + case 0x68: /* push Iv */ + case 0x6a: + if (CODE64(s)) { + ot = dflag ? OT_QUAD : OT_WORD; + } else { + ot = dflag + OT_WORD; + } + if (b == 0x68) + val = insn_get(s, ot); + else + val = (int8_t)insn_get(s, OT_BYTE); + gen_op_movl_T0_im(val); + gen_push_T0(s); + break; + case 0x8f: /* pop Ev */ + if (CODE64(s)) { + ot = dflag ? OT_QUAD : OT_WORD; + } else { + ot = dflag + OT_WORD; + } + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + gen_pop_T0(s); + if (mod == 3) { + /* NOTE: order is important for pop %sp */ + gen_pop_update(s); + rm = (modrm & 7) | REX_B(s); + gen_op_mov_reg_T0[ot][rm](); + } else { + /* NOTE: order is important too for MMU exceptions */ + s->popl_esp_hack = 1 << ot; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); + s->popl_esp_hack = 0; + gen_pop_update(s); + } + break; + case 0xc8: /* enter */ + { + int level; + val = lduw_code(s->pc); + s->pc += 2; + level = ldub_code(s->pc++); + gen_enter(s, val, level); + } + break; + case 0xc9: /* leave */ + /* XXX: exception not precise (ESP is updated before potential exception) */ + if (CODE64(s)) { + gen_op_mov_TN_reg[OT_QUAD][0][R_EBP](); + gen_op_mov_reg_T0[OT_QUAD][R_ESP](); + } else if (s->ss32) { + gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); + gen_op_mov_reg_T0[OT_LONG][R_ESP](); + } else { + gen_op_mov_TN_reg[OT_WORD][0][R_EBP](); + gen_op_mov_reg_T0[OT_WORD][R_ESP](); + } + gen_pop_T0(s); + if (CODE64(s)) { + ot = dflag ? OT_QUAD : OT_WORD; + } else { + ot = dflag + OT_WORD; + } + gen_op_mov_reg_T0[ot][R_EBP](); + gen_pop_update(s); + break; + case 0x06: /* push es */ + case 0x0e: /* push cs */ + case 0x16: /* push ss */ + case 0x1e: /* push ds */ + if (CODE64(s)) + goto illegal_op; + gen_op_movl_T0_seg(b >> 3); + gen_push_T0(s); + break; + case 0x1a0: /* push fs */ + case 0x1a8: /* push gs */ + gen_op_movl_T0_seg((b >> 3) & 7); + gen_push_T0(s); + break; + case 0x07: /* pop es */ + case 0x17: /* pop ss */ + case 0x1f: /* pop ds */ + if (CODE64(s)) + goto illegal_op; + reg = b >> 3; + gen_pop_T0(s); + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); + gen_pop_update(s); + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace. */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_op_set_inhibit_irq(); + s->tf = 0; + } + if (s->is_jmp) { + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x1a1: /* pop fs */ + case 0x1a9: /* pop gs */ + gen_pop_T0(s); + gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); + gen_pop_update(s); + if (s->is_jmp) { + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + + /**************************/ + /* mov */ + case 0x88: + case 0x89: /* mov Gv, Ev */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + /* generate a generic store */ + gen_ldst_modrm(s, modrm, ot, reg, 1); + break; + case 0xc6: + case 0xc7: /* mov Ev, Iv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod != 3) { + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + } + val = insn_get(s, ot); + gen_op_movl_T0_im(val); + if (mod != 3) + gen_op_st_T0_A0[ot + s->mem_index](); + else + gen_op_mov_reg_T0[ot][(modrm & 7) | REX_B(s)](); + break; + case 0x8a: + case 0x8b: /* mov Ev, Gv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = OT_WORD + dflag; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + gen_op_mov_reg_T0[ot][reg](); + break; + case 0x8e: /* mov seg, Gv */ + modrm = ldub_code(s->pc++); + reg = (modrm >> 3) & 7; + if (reg >= 6 || reg == R_CS) + goto illegal_op; + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_op_set_inhibit_irq(); + s->tf = 0; + } + if (s->is_jmp) { + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x8c: /* mov Gv, seg */ + modrm = ldub_code(s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (reg >= 6) + goto illegal_op; + gen_op_movl_T0_seg(reg); + if (mod == 3) + ot = OT_WORD + dflag; + else + ot = OT_WORD; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); + break; + + case 0x1b6: /* movzbS Gv, Eb */ + case 0x1b7: /* movzwS Gv, Eb */ + case 0x1be: /* movsbS Gv, Eb */ + case 0x1bf: /* movswS Gv, Eb */ + { + int d_ot; + /* d_ot is the size of destination */ + d_ot = dflag + OT_WORD; + /* ot is the size of source */ + ot = (b & 1) + OT_BYTE; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + gen_op_mov_TN_reg[ot][0][rm](); + switch(ot | (b & 8)) { + case OT_BYTE: + gen_op_movzbl_T0_T0(); + break; + case OT_BYTE | 8: + gen_op_movsbl_T0_T0(); + break; + case OT_WORD: + gen_op_movzwl_T0_T0(); + break; + default: + case OT_WORD | 8: + gen_op_movswl_T0_T0(); + break; + } + gen_op_mov_reg_T0[d_ot][reg](); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (b & 8) { + gen_op_lds_T0_A0[ot + s->mem_index](); + } else { + gen_op_ldu_T0_A0[ot + s->mem_index](); + } + gen_op_mov_reg_T0[d_ot][reg](); + } + } + break; + + case 0x8d: /* lea */ + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + /* we must ensure that no segment is added */ + s->override = -1; + val = s->addseg; + s->addseg = 0; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + s->addseg = val; + gen_op_mov_reg_A0[ot - OT_WORD][reg](); + break; + + case 0xa0: /* mov EAX, Ov */ + case 0xa1: + case 0xa2: /* mov Ov, EAX */ + case 0xa3: + { + target_ulong offset_addr; + + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + offset_addr = ldq_code(s->pc); + s->pc += 8; + if (offset_addr == (int32_t)offset_addr) + gen_op_movq_A0_im(offset_addr); + else + gen_op_movq_A0_im64(offset_addr >> 32, offset_addr); + } else +#endif + { + if (s->aflag) { + offset_addr = insn_get(s, OT_LONG); + } else { + offset_addr = insn_get(s, OT_WORD); + } + gen_op_movl_A0_im(offset_addr); + } + gen_add_A0_ds_seg(s); + if ((b & 2) == 0) { + gen_op_ld_T0_A0[ot + s->mem_index](); + gen_op_mov_reg_T0[ot][R_EAX](); + } else { + gen_op_mov_TN_reg[ot][0][R_EAX](); + gen_op_st_T0_A0[ot + s->mem_index](); + } + } + break; + case 0xd7: /* xlat */ +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_movq_A0_reg[R_EBX](); + gen_op_addq_A0_AL(); + } else +#endif + { + gen_op_movl_A0_reg[R_EBX](); + gen_op_addl_A0_AL(); + if (s->aflag == 0) + gen_op_andl_A0_ffff(); + } + gen_add_A0_ds_seg(s); + gen_op_ldu_T0_A0[OT_BYTE + s->mem_index](); + gen_op_mov_reg_T0[OT_BYTE][R_EAX](); + break; + case 0xb0 ... 0xb7: /* mov R, Ib */ + val = insn_get(s, OT_BYTE); + gen_op_movl_T0_im(val); + gen_op_mov_reg_T0[OT_BYTE][(b & 7) | REX_B(s)](); + break; + case 0xb8 ... 0xbf: /* mov R, Iv */ +#ifdef TARGET_X86_64 + if (dflag == 2) { + uint64_t tmp; + /* 64 bit case */ + tmp = ldq_code(s->pc); + s->pc += 8; + reg = (b & 7) | REX_B(s); + gen_movtl_T0_im(tmp); + gen_op_mov_reg_T0[OT_QUAD][reg](); + } else +#endif + { + ot = dflag ? OT_LONG : OT_WORD; + val = insn_get(s, ot); + reg = (b & 7) | REX_B(s); + gen_op_movl_T0_im(val); + gen_op_mov_reg_T0[ot][reg](); + } + break; + + case 0x91 ... 0x97: /* xchg R, EAX */ + ot = dflag + OT_WORD; + reg = (b & 7) | REX_B(s); + rm = R_EAX; + goto do_xchg_reg; + case 0x86: + case 0x87: /* xchg Ev, Gv */ + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + do_xchg_reg: + gen_op_mov_TN_reg[ot][0][reg](); + gen_op_mov_TN_reg[ot][1][rm](); + gen_op_mov_reg_T0[ot][rm](); + gen_op_mov_reg_T1[ot][reg](); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_mov_TN_reg[ot][0][reg](); + /* for xchg, lock is implicit */ + if (!(prefixes & PREFIX_LOCK)) + gen_op_lock(); + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_op_st_T0_A0[ot + s->mem_index](); + if (!(prefixes & PREFIX_LOCK)) + gen_op_unlock(); + gen_op_mov_reg_T1[ot][reg](); + } + break; + case 0xc4: /* les Gv */ + if (CODE64(s)) + goto illegal_op; + op = R_ES; + goto do_lxx; + case 0xc5: /* lds Gv */ + if (CODE64(s)) + goto illegal_op; + op = R_DS; + goto do_lxx; + case 0x1b2: /* lss Gv */ + op = R_SS; + goto do_lxx; + case 0x1b4: /* lfs Gv */ + op = R_FS; + goto do_lxx; + case 0x1b5: /* lgs Gv */ + op = R_GS; + do_lxx: + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T1_A0[ot + s->mem_index](); + gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); + /* load the segment first to handle exceptions properly */ + gen_op_ldu_T0_A0[OT_WORD + s->mem_index](); + gen_movl_seg_T0(s, op, pc_start - s->cs_base); + /* then put the data */ + gen_op_mov_reg_T1[ot][reg](); + if (s->is_jmp) { + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + + /************************/ + /* shifts */ + case 0xc0: + case 0xc1: + /* shift Ev,Ib */ + shift = 2; + grp2: + { + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (shift == 2) { + s->rip_offset = 1; + } + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + opreg = OR_TMP0; + } else { + opreg = (modrm & 7) | REX_B(s); + } + + /* simpler op */ + if (shift == 0) { + gen_shift(s, op, ot, opreg, OR_ECX); + } else { + if (shift == 2) { + shift = ldub_code(s->pc++); + } + gen_shifti(s, op, ot, opreg, shift); + } + } + break; + case 0xd0: + case 0xd1: + /* shift Ev,1 */ + shift = 1; + goto grp2; + case 0xd2: + case 0xd3: + /* shift Ev,cl */ + shift = 0; + goto grp2; + + case 0x1a4: /* shld imm */ + op = 0; + shift = 1; + goto do_shiftd; + case 0x1a5: /* shld cl */ + op = 0; + shift = 0; + goto do_shiftd; + case 0x1ac: /* shrd imm */ + op = 1; + shift = 1; + goto do_shiftd; + case 0x1ad: /* shrd cl */ + op = 1; + shift = 0; + do_shiftd: + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + gen_op_mov_TN_reg[ot][1][reg](); + + if (shift) { + val = ldub_code(s->pc++); + if (ot == OT_QUAD) + val &= 0x3f; + else + val &= 0x1f; + if (val) { + if (mod == 3) + gen_op_shiftd_T0_T1_im_cc[ot][op](val); + else + gen_op_shiftd_mem_T0_T1_im_cc[ot + s->mem_index][op](val); + if (op == 0 && ot != OT_WORD) + s->cc_op = CC_OP_SHLB + ot; + else + s->cc_op = CC_OP_SARB + ot; + } + } else { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + if (mod == 3) + gen_op_shiftd_T0_T1_ECX_cc[ot][op](); + else + gen_op_shiftd_mem_T0_T1_ECX_cc[ot + s->mem_index][op](); + s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ + } + if (mod == 3) { + gen_op_mov_reg_T0[ot][rm](); + } + break; + + /************************/ + /* floats */ + case 0xd8 ... 0xdf: + if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { + /* if CR0.EM or CR0.TS are set, generate an FPU exception */ + /* XXX: what to do if illegal op ? */ + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + rm = modrm & 7; + op = ((b & 7) << 3) | ((modrm >> 3) & 7); + if (mod != 3) { + /* memory op */ + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + switch(op) { + case 0x00 ... 0x07: /* fxxxs */ + case 0x10 ... 0x17: /* fixxxl */ + case 0x20 ... 0x27: /* fxxxl */ + case 0x30 ... 0x37: /* fixxx */ + { + int op1; + op1 = op & 7; + + switch(op >> 4) { + case 0: + gen_op_flds_FT0_A0(); + break; + case 1: + gen_op_fildl_FT0_A0(); + break; + case 2: + gen_op_fldl_FT0_A0(); + break; + case 3: + default: + gen_op_fild_FT0_A0(); + break; + } + + gen_op_fp_arith_ST0_FT0[op1](); + if (op1 == 3) { + /* fcomp needs pop */ + gen_op_fpop(); + } + } + break; + case 0x08: /* flds */ + case 0x0a: /* fsts */ + case 0x0b: /* fstps */ + case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ + case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ + case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ + switch(op & 7) { + case 0: + switch(op >> 4) { + case 0: + gen_op_flds_ST0_A0(); + break; + case 1: + gen_op_fildl_ST0_A0(); + break; + case 2: + gen_op_fldl_ST0_A0(); + break; + case 3: + default: + gen_op_fild_ST0_A0(); + break; + } + break; + case 1: + switch(op >> 4) { + case 1: + gen_op_fisttl_ST0_A0(); + break; + case 2: + gen_op_fisttll_ST0_A0(); + break; + case 3: + default: + gen_op_fistt_ST0_A0(); + } + gen_op_fpop(); + break; + default: + switch(op >> 4) { + case 0: + gen_op_fsts_ST0_A0(); + break; + case 1: + gen_op_fistl_ST0_A0(); + break; + case 2: + gen_op_fstl_ST0_A0(); + break; + case 3: + default: + gen_op_fist_ST0_A0(); + break; + } + if ((op & 7) == 3) + gen_op_fpop(); + break; + } + break; + case 0x0c: /* fldenv mem */ + gen_op_fldenv_A0(s->dflag); + break; + case 0x0d: /* fldcw mem */ + gen_op_fldcw_A0(); + break; + case 0x0e: /* fnstenv mem */ + gen_op_fnstenv_A0(s->dflag); + break; + case 0x0f: /* fnstcw mem */ + gen_op_fnstcw_A0(); + break; + case 0x1d: /* fldt mem */ + gen_op_fldt_ST0_A0(); + break; + case 0x1f: /* fstpt mem */ + gen_op_fstt_ST0_A0(); + gen_op_fpop(); + break; + case 0x2c: /* frstor mem */ + gen_op_frstor_A0(s->dflag); + break; + case 0x2e: /* fnsave mem */ + gen_op_fnsave_A0(s->dflag); + break; + case 0x2f: /* fnstsw mem */ + gen_op_fnstsw_A0(); + break; + case 0x3c: /* fbld */ + gen_op_fbld_ST0_A0(); + break; + case 0x3e: /* fbstp */ + gen_op_fbst_ST0_A0(); + gen_op_fpop(); + break; + case 0x3d: /* fildll */ + gen_op_fildll_ST0_A0(); + break; + case 0x3f: /* fistpll */ + gen_op_fistll_ST0_A0(); + gen_op_fpop(); + break; + default: + goto illegal_op; + } + } else { + /* register float ops */ + opreg = rm; + + switch(op) { + case 0x08: /* fld sti */ + gen_op_fpush(); + gen_op_fmov_ST0_STN((opreg + 1) & 7); + break; + case 0x09: /* fxchg sti */ + case 0x29: /* fxchg4 sti, undocumented op */ + case 0x39: /* fxchg7 sti, undocumented op */ + gen_op_fxchg_ST0_STN(opreg); + break; + case 0x0a: /* grp d9/2 */ + switch(rm) { + case 0: /* fnop */ + /* check exceptions (FreeBSD FPU probe) */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_fwait(); + break; + default: + goto illegal_op; + } + break; + case 0x0c: /* grp d9/4 */ + switch(rm) { + case 0: /* fchs */ + gen_op_fchs_ST0(); + break; + case 1: /* fabs */ + gen_op_fabs_ST0(); + break; + case 4: /* ftst */ + gen_op_fldz_FT0(); + gen_op_fcom_ST0_FT0(); + break; + case 5: /* fxam */ + gen_op_fxam_ST0(); + break; + default: + goto illegal_op; + } + break; + case 0x0d: /* grp d9/5 */ + { + switch(rm) { + case 0: + gen_op_fpush(); + gen_op_fld1_ST0(); + break; + case 1: + gen_op_fpush(); + gen_op_fldl2t_ST0(); + break; + case 2: + gen_op_fpush(); + gen_op_fldl2e_ST0(); + break; + case 3: + gen_op_fpush(); + gen_op_fldpi_ST0(); + break; + case 4: + gen_op_fpush(); + gen_op_fldlg2_ST0(); + break; + case 5: + gen_op_fpush(); + gen_op_fldln2_ST0(); + break; + case 6: + gen_op_fpush(); + gen_op_fldz_ST0(); + break; + default: + goto illegal_op; + } + } + break; + case 0x0e: /* grp d9/6 */ + switch(rm) { + case 0: /* f2xm1 */ + gen_op_f2xm1(); + break; + case 1: /* fyl2x */ + gen_op_fyl2x(); + break; + case 2: /* fptan */ + gen_op_fptan(); + break; + case 3: /* fpatan */ + gen_op_fpatan(); + break; + case 4: /* fxtract */ + gen_op_fxtract(); + break; + case 5: /* fprem1 */ + gen_op_fprem1(); + break; + case 6: /* fdecstp */ + gen_op_fdecstp(); + break; + default: + case 7: /* fincstp */ + gen_op_fincstp(); + break; + } + break; + case 0x0f: /* grp d9/7 */ + switch(rm) { + case 0: /* fprem */ + gen_op_fprem(); + break; + case 1: /* fyl2xp1 */ + gen_op_fyl2xp1(); + break; + case 2: /* fsqrt */ + gen_op_fsqrt(); + break; + case 3: /* fsincos */ + gen_op_fsincos(); + break; + case 5: /* fscale */ + gen_op_fscale(); + break; + case 4: /* frndint */ + gen_op_frndint(); + break; + case 6: /* fsin */ + gen_op_fsin(); + break; + default: + case 7: /* fcos */ + gen_op_fcos(); + break; + } + break; + case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ + case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ + case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ + { + int op1; + + op1 = op & 7; + if (op >= 0x20) { + gen_op_fp_arith_STN_ST0[op1](opreg); + if (op >= 0x30) + gen_op_fpop(); + } else { + gen_op_fmov_FT0_STN(opreg); + gen_op_fp_arith_ST0_FT0[op1](); + } + } + break; + case 0x02: /* fcom */ + case 0x22: /* fcom2, undocumented op */ + gen_op_fmov_FT0_STN(opreg); + gen_op_fcom_ST0_FT0(); + break; + case 0x03: /* fcomp */ + case 0x23: /* fcomp3, undocumented op */ + case 0x32: /* fcomp5, undocumented op */ + gen_op_fmov_FT0_STN(opreg); + gen_op_fcom_ST0_FT0(); + gen_op_fpop(); + break; + case 0x15: /* da/5 */ + switch(rm) { + case 1: /* fucompp */ + gen_op_fmov_FT0_STN(1); + gen_op_fucom_ST0_FT0(); + gen_op_fpop(); + gen_op_fpop(); + break; + default: + goto illegal_op; + } + break; + case 0x1c: + switch(rm) { + case 0: /* feni (287 only, just do nop here) */ + break; + case 1: /* fdisi (287 only, just do nop here) */ + break; + case 2: /* fclex */ + gen_op_fclex(); + break; + case 3: /* fninit */ + gen_op_fninit(); + break; + case 4: /* fsetpm (287 only, just do nop here) */ + break; + default: + goto illegal_op; + } + break; + case 0x1d: /* fucomi */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_fmov_FT0_STN(opreg); + gen_op_fucomi_ST0_FT0(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x1e: /* fcomi */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_fmov_FT0_STN(opreg); + gen_op_fcomi_ST0_FT0(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x28: /* ffree sti */ + gen_op_ffree_STN(opreg); + break; + case 0x2a: /* fst sti */ + gen_op_fmov_STN_ST0(opreg); + break; + case 0x2b: /* fstp sti */ + case 0x0b: /* fstp1 sti, undocumented op */ + case 0x3a: /* fstp8 sti, undocumented op */ + case 0x3b: /* fstp9 sti, undocumented op */ + gen_op_fmov_STN_ST0(opreg); + gen_op_fpop(); + break; + case 0x2c: /* fucom st(i) */ + gen_op_fmov_FT0_STN(opreg); + gen_op_fucom_ST0_FT0(); + break; + case 0x2d: /* fucomp st(i) */ + gen_op_fmov_FT0_STN(opreg); + gen_op_fucom_ST0_FT0(); + gen_op_fpop(); + break; + case 0x33: /* de/3 */ + switch(rm) { + case 1: /* fcompp */ + gen_op_fmov_FT0_STN(1); + gen_op_fcom_ST0_FT0(); + gen_op_fpop(); + gen_op_fpop(); + break; + default: + goto illegal_op; + } + break; + case 0x38: /* ffreep sti, undocumented op */ + gen_op_ffree_STN(opreg); + gen_op_fpop(); + break; + case 0x3c: /* df/4 */ + switch(rm) { + case 0: + gen_op_fnstsw_EAX(); + break; + default: + goto illegal_op; + } + break; + case 0x3d: /* fucomip */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_fmov_FT0_STN(opreg); + gen_op_fucomi_ST0_FT0(); + gen_op_fpop(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x3e: /* fcomip */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_fmov_FT0_STN(opreg); + gen_op_fcomi_ST0_FT0(); + gen_op_fpop(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x10 ... 0x13: /* fcmovxx */ + case 0x18 ... 0x1b: + { + int op1; + const static uint8_t fcmov_cc[8] = { + (JCC_B << 1), + (JCC_Z << 1), + (JCC_BE << 1), + (JCC_P << 1), + }; + op1 = fcmov_cc[op & 3] | ((op >> 3) & 1); + gen_setcc(s, op1); + gen_op_fcmov_ST0_STN_T0(opreg); + } + break; + default: + goto illegal_op; + } + } +#ifdef USE_CODE_COPY + s->tb->cflags |= CF_TB_FP_USED; +#endif + break; + /************************/ + /* string ops */ + + case 0xa4: /* movsS */ + case 0xa5: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_movs(s, ot); + } + break; + + case 0xaa: /* stosS */ + case 0xab: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_stos(s, ot); + } + break; + case 0xac: /* lodsS */ + case 0xad: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_lods(s, ot); + } + break; + case 0xae: /* scasS */ + case 0xaf: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + if (prefixes & PREFIX_REPNZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_scas(s, ot); + s->cc_op = CC_OP_SUBB + ot; + } + break; + + case 0xa6: /* cmpsS */ + case 0xa7: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag + OT_WORD; + if (prefixes & PREFIX_REPNZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_cmps(s, ot); + s->cc_op = CC_OP_SUBB + ot; + } + break; + case 0x6c: /* insS */ + case 0x6d: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + gen_check_io(s, ot, 1, pc_start - s->cs_base); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_ins(s, ot); + } + break; + case 0x6e: /* outsS */ + case 0x6f: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + gen_check_io(s, ot, 1, pc_start - s->cs_base); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_outs(s, ot); + } + break; + + /************************/ + /* port I/O */ + case 0xe4: + case 0xe5: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + val = ldub_code(s->pc++); + gen_op_movl_T0_im(val); + gen_check_io(s, ot, 0, pc_start - s->cs_base); + gen_op_in[ot](); + gen_op_mov_reg_T1[ot][R_EAX](); + break; + case 0xe6: + case 0xe7: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + val = ldub_code(s->pc++); + gen_op_movl_T0_im(val); + gen_check_io(s, ot, 0, pc_start - s->cs_base); + gen_op_mov_TN_reg[ot][1][R_EAX](); + gen_op_out[ot](); + break; + case 0xec: + case 0xed: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); + gen_op_andl_T0_ffff(); + gen_check_io(s, ot, 0, pc_start - s->cs_base); + gen_op_in[ot](); + gen_op_mov_reg_T1[ot][R_EAX](); + break; + case 0xee: + case 0xef: + if ((b & 1) == 0) + ot = OT_BYTE; + else + ot = dflag ? OT_LONG : OT_WORD; + gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); + gen_op_andl_T0_ffff(); + gen_check_io(s, ot, 0, pc_start - s->cs_base); + gen_op_mov_TN_reg[ot][1][R_EAX](); + gen_op_out[ot](); + break; + + /************************/ + /* control */ + case 0xc2: /* ret im */ + val = ldsw_code(s->pc); + s->pc += 2; + gen_pop_T0(s); + if (CODE64(s) && s->dflag) + s->dflag = 2; + gen_stack_update(s, val + (2 << s->dflag)); + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + gen_op_jmp_T0(); + gen_eob(s); + break; + case 0xc3: /* ret */ + gen_pop_T0(s); + gen_pop_update(s); + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + gen_op_jmp_T0(); + gen_eob(s); + break; + case 0xca: /* lret im */ + val = ldsw_code(s->pc); + s->pc += 2; + do_lret: + if (s->pe && !s->vm86) { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_lret_protected(s->dflag, val); + } else { + gen_stack_A0(s); + /* pop offset */ + gen_op_ld_T0_A0[1 + s->dflag + s->mem_index](); + if (s->dflag == 0) + gen_op_andl_T0_ffff(); + /* NOTE: keeping EIP updated is not a problem in case of + exception */ + gen_op_jmp_T0(); + /* pop selector */ + gen_op_addl_A0_im(2 << s->dflag); + gen_op_ld_T0_A0[1 + s->dflag + s->mem_index](); + gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS])); + /* add stack offset */ + gen_stack_update(s, val + (4 << s->dflag)); + } + gen_eob(s); + break; + case 0xcb: /* lret */ + val = 0; + goto do_lret; + case 0xcf: /* iret */ + if (!s->pe) { + /* real mode */ + gen_op_iret_real(s->dflag); + s->cc_op = CC_OP_EFLAGS; + } else if (s->vm86) { + if (s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_op_iret_real(s->dflag); + s->cc_op = CC_OP_EFLAGS; + } + } else { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_iret_protected(s->dflag, s->pc - s->cs_base); + s->cc_op = CC_OP_EFLAGS; + } + gen_eob(s); + break; + case 0xe8: /* call im */ + { + if (dflag) + tval = (int32_t)insn_get(s, OT_LONG); + else + tval = (int16_t)insn_get(s, OT_WORD); + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (s->dflag == 0) + tval &= 0xffff; + gen_movtl_T0_im(next_eip); + gen_push_T0(s); + gen_jmp(s, tval); + } + break; + case 0x9a: /* lcall im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag ? OT_LONG : OT_WORD; + offset = insn_get(s, ot); + selector = insn_get(s, OT_WORD); + + gen_op_movl_T0_im(selector); + gen_op_movl_T1_imu(offset); + } + goto do_lcall; + case 0xe9: /* jmp im */ + if (dflag) + tval = (int32_t)insn_get(s, OT_LONG); + else + tval = (int16_t)insn_get(s, OT_WORD); + tval += s->pc - s->cs_base; + if (s->dflag == 0) + tval &= 0xffff; + gen_jmp(s, tval); + break; + case 0xea: /* ljmp im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag ? OT_LONG : OT_WORD; + offset = insn_get(s, ot); + selector = insn_get(s, OT_WORD); + + gen_op_movl_T0_im(selector); + gen_op_movl_T1_imu(offset); + } + goto do_ljmp; + case 0xeb: /* jmp Jb */ + tval = (int8_t)insn_get(s, OT_BYTE); + tval += s->pc - s->cs_base; + if (s->dflag == 0) + tval &= 0xffff; + gen_jmp(s, tval); + break; + case 0x70 ... 0x7f: /* jcc Jb */ + tval = (int8_t)insn_get(s, OT_BYTE); + goto do_jcc; + case 0x180 ... 0x18f: /* jcc Jv */ + if (dflag) { + tval = (int32_t)insn_get(s, OT_LONG); + } else { + tval = (int16_t)insn_get(s, OT_WORD); + } + do_jcc: + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (s->dflag == 0) + tval &= 0xffff; + gen_jcc(s, b, tval, next_eip); + break; + + case 0x190 ... 0x19f: /* setcc Gv */ + modrm = ldub_code(s->pc++); + gen_setcc(s, b); + gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1); + break; + case 0x140 ... 0x14f: /* cmov Gv, Ev */ + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + gen_setcc(s, b); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T1_A0[ot + s->mem_index](); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_TN_reg[ot][1][rm](); + } + gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); + break; + + /************************/ + /* flags */ + case 0x9c: /* pushf */ + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_movl_T0_eflags(); + gen_push_T0(s); + } + break; + case 0x9d: /* popf */ + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_pop_T0(s); + if (s->cpl == 0) { + if (s->dflag) { + gen_op_movl_eflags_T0_cpl0(); + } else { + gen_op_movw_eflags_T0_cpl0(); + } + } else { + if (s->cpl <= s->iopl) { + if (s->dflag) { + gen_op_movl_eflags_T0_io(); + } else { + gen_op_movw_eflags_T0_io(); + } + } else { + if (s->dflag) { + gen_op_movl_eflags_T0(); + } else { + gen_op_movw_eflags_T0(); + } + } + } + gen_pop_update(s); + s->cc_op = CC_OP_EFLAGS; + /* abort translation because TF flag may change */ + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x9e: /* sahf */ + if (CODE64(s)) + goto illegal_op; + gen_op_mov_TN_reg[OT_BYTE][0][R_AH](); + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_movb_eflags_T0(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x9f: /* lahf */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_movl_T0_eflags(); + gen_op_mov_reg_T0[OT_BYTE][R_AH](); + break; + case 0xf5: /* cmc */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_cmc(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0xf8: /* clc */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_clc(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0xf9: /* stc */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_stc(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0xfc: /* cld */ + gen_op_cld(); + break; + case 0xfd: /* std */ + gen_op_std(); + break; + + /************************/ + /* bit operations */ + case 0x1ba: /* bt/bts/btr/btc Gv, im */ + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + op = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + s->rip_offset = 1; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + /* load shift */ + val = ldub_code(s->pc++); + gen_op_movl_T1_im(val); + if (op < 4) + goto illegal_op; + op -= 4; + gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); + s->cc_op = CC_OP_SARB + ot; + if (op != 0) { + if (mod != 3) + gen_op_st_T0_A0[ot + s->mem_index](); + else + gen_op_mov_reg_T0[ot][rm](); + gen_op_update_bt_cc(); + } + break; + case 0x1a3: /* bt Gv, Ev */ + op = 0; + goto do_btx; + case 0x1ab: /* bts */ + op = 1; + goto do_btx; + case 0x1b3: /* btr */ + op = 2; + goto do_btx; + case 0x1bb: /* btc */ + op = 3; + do_btx: + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + gen_op_mov_TN_reg[OT_LONG][1][reg](); + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + /* specific case: we need to add a displacement */ + gen_op_add_bit_A0_T1[ot - OT_WORD](); + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); + s->cc_op = CC_OP_SARB + ot; + if (op != 0) { + if (mod != 3) + gen_op_st_T0_A0[ot + s->mem_index](); + else + gen_op_mov_reg_T0[ot][rm](); + gen_op_update_bt_cc(); + } + break; + case 0x1bc: /* bsf */ + case 0x1bd: /* bsr */ + ot = dflag + OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + /* NOTE: in order to handle the 0 case, we must load the + result. It could be optimized with a generated jump */ + gen_op_mov_TN_reg[ot][1][reg](); + gen_op_bsx_T0_cc[ot - OT_WORD][b & 1](); + gen_op_mov_reg_T1[ot][reg](); + s->cc_op = CC_OP_LOGICB + ot; + break; + /************************/ + /* bcd */ + case 0x27: /* daa */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_daa(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x2f: /* das */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_das(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x37: /* aaa */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_aaa(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0x3f: /* aas */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_aas(); + s->cc_op = CC_OP_EFLAGS; + break; + case 0xd4: /* aam */ + if (CODE64(s)) + goto illegal_op; + val = ldub_code(s->pc++); + gen_op_aam(val); + s->cc_op = CC_OP_LOGICB; + break; + case 0xd5: /* aad */ + if (CODE64(s)) + goto illegal_op; + val = ldub_code(s->pc++); + gen_op_aad(val); + s->cc_op = CC_OP_LOGICB; + break; + /************************/ + /* misc */ + case 0x90: /* nop */ + /* XXX: xchg + rex handling */ + /* XXX: correct lock test for all insn */ + if (prefixes & PREFIX_LOCK) + goto illegal_op; + break; + case 0x9b: /* fwait */ + if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == + (HF_MP_MASK | HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_fwait(); + } + break; + case 0xcc: /* int3 */ + gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); + break; + case 0xcd: /* int N */ + val = ldub_code(s->pc++); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); + } + break; + case 0xce: /* into */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(pc_start - s->cs_base); + gen_op_into(s->pc - pc_start); + break; + case 0xf1: /* icebp (undocumented, exits to external debugger) */ +#if 1 + gen_debug(s, pc_start - s->cs_base); +#else + /* start debug */ + tb_flush(cpu_single_env); + cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); +#endif + break; + case 0xfa: /* cli */ + if (!s->vm86) { + if (s->cpl <= s->iopl) { + gen_op_cli(); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } else { + if (s->iopl == 3) { + gen_op_cli(); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } + break; + case 0xfb: /* sti */ + if (!s->vm86) { + if (s->cpl <= s->iopl) { + gen_sti: + gen_op_sti(); + /* interruptions are enabled only the first insn after sti */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_op_set_inhibit_irq(); + /* give a chance to handle pending irqs */ + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } else { + if (s->iopl == 3) { + goto gen_sti; + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } + break; + case 0x62: /* bound */ + if (CODE64(s)) + goto illegal_op; + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_op_mov_TN_reg[ot][0][reg](); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_jmp_im(pc_start - s->cs_base); + if (ot == OT_WORD) + gen_op_boundw(); + else + gen_op_boundl(); + break; + case 0x1c8 ... 0x1cf: /* bswap reg */ + reg = (b & 7) | REX_B(s); +#ifdef TARGET_X86_64 + if (dflag == 2) { + gen_op_mov_TN_reg[OT_QUAD][0][reg](); + gen_op_bswapq_T0(); + gen_op_mov_reg_T0[OT_QUAD][reg](); + } else +#endif + { + gen_op_mov_TN_reg[OT_LONG][0][reg](); + gen_op_bswapl_T0(); + gen_op_mov_reg_T0[OT_LONG][reg](); + } + break; + case 0xd6: /* salc */ + if (CODE64(s)) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_salc(); + break; + case 0xe0: /* loopnz */ + case 0xe1: /* loopz */ + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + /* FALL THRU */ + case 0xe2: /* loop */ + case 0xe3: /* jecxz */ + { + int l1, l2; + + tval = (int8_t)insn_get(s, OT_BYTE); + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (s->dflag == 0) + tval &= 0xffff; + + l1 = gen_new_label(); + l2 = gen_new_label(); + b &= 3; + if (b == 3) { + gen_op_jz_ecx[s->aflag](l1); + } else { + gen_op_dec_ECX[s->aflag](); + if (b <= 1) + gen_op_mov_T0_cc(); + gen_op_loop[s->aflag][b](l1); + } + + gen_jmp_im(next_eip); + gen_op_jmp_label(l2); + gen_set_label(l1); + gen_jmp_im(tval); + gen_set_label(l2); + gen_eob(s); + } + break; + case 0x130: /* wrmsr */ + case 0x132: /* rdmsr */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (b & 2) + gen_op_rdmsr(); + else + gen_op_wrmsr(); + } + break; + case 0x131: /* rdtsc */ + gen_jmp_im(pc_start - s->cs_base); + gen_op_rdtsc(); + break; + case 0x134: /* sysenter */ + if (CODE64(s)) + goto illegal_op; + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_jmp_im(pc_start - s->cs_base); + gen_op_sysenter(); + gen_eob(s); + } + break; + case 0x135: /* sysexit */ + if (CODE64(s)) + goto illegal_op; + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_jmp_im(pc_start - s->cs_base); + gen_op_sysexit(); + gen_eob(s); + } + break; +#ifdef TARGET_X86_64 + case 0x105: /* syscall */ + /* XXX: is it usable in real mode ? */ + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_jmp_im(pc_start - s->cs_base); + gen_op_syscall(s->pc - pc_start); + gen_eob(s); + break; + case 0x107: /* sysret */ + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_jmp_im(pc_start - s->cs_base); + gen_op_sysret(s->dflag); + /* condition codes are modified only in long mode */ + if (s->lma) + s->cc_op = CC_OP_EFLAGS; + gen_eob(s); + } + break; +#endif + case 0x1a2: /* cpuid */ + gen_op_cpuid(); + break; + case 0xf4: /* hlt */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_jmp_im(s->pc - s->cs_base); + gen_op_hlt(); + s->is_jmp = 3; + } + break; + case 0x100: + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* sldt */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_op_movl_T0_env(offsetof(CPUX86State,ldt.selector)); + ot = OT_WORD; + if (mod == 3) + ot += s->dflag; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); + break; + case 2: /* lldt */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + gen_jmp_im(pc_start - s->cs_base); + gen_op_lldt_T0(); + } + break; + case 1: /* str */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_op_movl_T0_env(offsetof(CPUX86State,tr.selector)); + ot = OT_WORD; + if (mod == 3) + ot += s->dflag; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); + break; + case 3: /* ltr */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + gen_jmp_im(pc_start - s->cs_base); + gen_op_ltr_T0(); + } + break; + case 4: /* verr */ + case 5: /* verw */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + if (op == 4) + gen_op_verr(); + else + gen_op_verw(); + s->cc_op = CC_OP_EFLAGS; + break; + default: + goto illegal_op; + } + break; + case 0x101: + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + rm = modrm & 7; + switch(op) { + case 0: /* sgdt */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_movl_T0_env(offsetof(CPUX86State, gdt.limit)); + gen_op_st_T0_A0[OT_WORD + s->mem_index](); + gen_add_A0_im(s, 2); + gen_op_movtl_T0_env(offsetof(CPUX86State, gdt.base)); + if (!s->dflag) + gen_op_andl_T0_im(0xffffff); + gen_op_st_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); + break; + case 1: + if (mod == 3) { + switch (rm) { + case 0: /* monitor */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || + s->cpl != 0) + goto illegal_op; + gen_jmp_im(pc_start - s->cs_base); +#ifdef TARGET_X86_64 + if (s->aflag == 2) { + gen_op_movq_A0_reg[R_EBX](); + gen_op_addq_A0_AL(); + } else +#endif + { + gen_op_movl_A0_reg[R_EBX](); + gen_op_addl_A0_AL(); + if (s->aflag == 0) + gen_op_andl_A0_ffff(); + } + gen_add_A0_ds_seg(s); + gen_op_monitor(); + break; + case 1: /* mwait */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || + s->cpl != 0) + goto illegal_op; + if (s->cc_op != CC_OP_DYNAMIC) { + gen_op_set_cc_op(s->cc_op); + s->cc_op = CC_OP_DYNAMIC; + } + gen_jmp_im(s->pc - s->cs_base); + gen_op_mwait(); + gen_eob(s); + break; + default: + goto illegal_op; + } + } else { /* sidt */ + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_movl_T0_env(offsetof(CPUX86State, idt.limit)); + gen_op_st_T0_A0[OT_WORD + s->mem_index](); + gen_add_A0_im(s, 2); + gen_op_movtl_T0_env(offsetof(CPUX86State, idt.base)); + if (!s->dflag) + gen_op_andl_T0_im(0xffffff); + gen_op_st_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); + } + break; + case 2: /* lgdt */ + case 3: /* lidt */ + if (mod == 3) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T1_A0[OT_WORD + s->mem_index](); + gen_add_A0_im(s, 2); + gen_op_ld_T0_A0[CODE64(s) + OT_LONG + s->mem_index](); + if (!s->dflag) + gen_op_andl_T0_im(0xffffff); + if (op == 2) { + gen_op_movtl_env_T0(offsetof(CPUX86State,gdt.base)); + gen_op_movl_env_T1(offsetof(CPUX86State,gdt.limit)); + } else { + gen_op_movtl_env_T0(offsetof(CPUX86State,idt.base)); + gen_op_movl_env_T1(offsetof(CPUX86State,idt.limit)); + } + } + break; + case 4: /* smsw */ + gen_op_movl_T0_env(offsetof(CPUX86State,cr[0])); + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1); + break; + case 6: /* lmsw */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); + gen_op_lmsw_T0(); + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + case 7: /* invlpg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + if (mod == 3) { +#ifdef TARGET_X86_64 + if (CODE64(s) && rm == 0) { + /* swapgs */ + gen_op_movtl_T0_env(offsetof(CPUX86State,segs[R_GS].base)); + gen_op_movtl_T1_env(offsetof(CPUX86State,kernelgsbase)); + gen_op_movtl_env_T1(offsetof(CPUX86State,segs[R_GS].base)); + gen_op_movtl_env_T0(offsetof(CPUX86State,kernelgsbase)); + } else +#endif + { + goto illegal_op; + } + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_invlpg_A0(); + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + } + break; + default: + goto illegal_op; + } + break; + case 0x108: /* invd */ + case 0x109: /* wbinvd */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + /* nothing to do */ + } + break; + case 0x63: /* arpl or movslS (x86_64) */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + int d_ot; + /* d_ot is the size of destination */ + d_ot = dflag + OT_WORD; + + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + gen_op_mov_TN_reg[OT_LONG][0][rm](); + /* sign extend */ + if (d_ot == OT_QUAD) + gen_op_movslq_T0_T0(); + gen_op_mov_reg_T0[d_ot][reg](); + } else { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (d_ot == OT_QUAD) { + gen_op_lds_T0_A0[OT_LONG + s->mem_index](); + } else { + gen_op_ld_T0_A0[OT_LONG + s->mem_index](); + } + gen_op_mov_reg_T0[d_ot][reg](); + } + } else +#endif + { + if (!s->pe || s->vm86) + goto illegal_op; + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = modrm & 7; + if (mod != 3) { + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_ld_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_TN_reg[ot][0][rm](); + } + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + gen_op_arpl(); + s->cc_op = CC_OP_EFLAGS; + if (mod != 3) { + gen_op_st_T0_A0[ot + s->mem_index](); + } else { + gen_op_mov_reg_T0[ot][rm](); + } + gen_op_arpl_update(); + } + break; + case 0x102: /* lar */ + case 0x103: /* lsl */ + if (!s->pe || s->vm86) + goto illegal_op; + ot = dflag ? OT_LONG : OT_WORD; + modrm = ldub_code(s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); + gen_op_mov_TN_reg[ot][1][reg](); + if (s->cc_op != CC_OP_DYNAMIC) + gen_op_set_cc_op(s->cc_op); + if (b == 0x102) + gen_op_lar(); + else + gen_op_lsl(); + s->cc_op = CC_OP_EFLAGS; + gen_op_mov_reg_T1[ot][reg](); + break; + case 0x118: + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* prefetchnta */ + case 1: /* prefetchnt0 */ + case 2: /* prefetchnt0 */ + case 3: /* prefetchnt0 */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + /* nothing more to do */ + break; + default: + goto illegal_op; + } + break; + case 0x120: /* mov reg, crN */ + case 0x122: /* mov crN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = ldub_code(s->pc++); + if ((modrm & 0xc0) != 0xc0) + goto illegal_op; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = OT_QUAD; + else + ot = OT_LONG; + switch(reg) { + case 0: + case 2: + case 3: + case 4: + case 8: + if (b & 2) { + gen_op_mov_TN_reg[ot][0][rm](); + gen_op_movl_crN_T0(reg); + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } else { +#if !defined(CONFIG_USER_ONLY) + if (reg == 8) + gen_op_movtl_T0_cr8(); + else +#endif + gen_op_movtl_T0_env(offsetof(CPUX86State,cr[reg])); + gen_op_mov_reg_T0[ot][rm](); + } + break; + default: + goto illegal_op; + } + } + break; + case 0x121: /* mov reg, drN */ + case 0x123: /* mov drN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = ldub_code(s->pc++); + if ((modrm & 0xc0) != 0xc0) + goto illegal_op; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = OT_QUAD; + else + ot = OT_LONG; + /* XXX: do it dynamically with CR4.DE bit */ + if (reg == 4 || reg == 5 || reg >= 8) + goto illegal_op; + if (b & 2) { + gen_op_mov_TN_reg[ot][0][rm](); + gen_op_movl_drN_T0(reg); + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } else { + gen_op_movtl_T0_env(offsetof(CPUX86State,dr[reg])); + gen_op_mov_reg_T0[ot][rm](); + } + } + break; + case 0x106: /* clts */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_op_clts(); + /* abort block because static cpu state changed */ + gen_jmp_im(s->pc - s->cs_base); + gen_eob(s); + } + break; + /* MMX/SSE/SSE2/PNI support */ + case 0x1c3: /* MOVNTI reg, mem */ + if (!(s->cpuid_features & CPUID_SSE2)) + goto illegal_op; + ot = s->dflag == 2 ? OT_QUAD : OT_LONG; + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + /* generate a generic store */ + gen_ldst_modrm(s, modrm, ot, reg, 1); + break; + case 0x1ae: + modrm = ldub_code(s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* fxsave */ + if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || + (s->flags & HF_EM_MASK)) + goto illegal_op; + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_fxsave_A0((s->dflag == 2)); + break; + case 1: /* fxrstor */ + if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || + (s->flags & HF_EM_MASK)) + goto illegal_op; + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + gen_op_fxrstor_A0((s->dflag == 2)); + break; + case 2: /* ldmxcsr */ + case 3: /* stmxcsr */ + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || + mod == 3) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (op == 2) { + gen_op_ld_T0_A0[OT_LONG + s->mem_index](); + gen_op_movl_env_T0(offsetof(CPUX86State, mxcsr)); + } else { + gen_op_movl_T0_env(offsetof(CPUX86State, mxcsr)); + gen_op_st_T0_A0[OT_LONG + s->mem_index](); + } + break; + case 5: /* lfence */ + case 6: /* mfence */ + if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE)) + goto illegal_op; + break; + case 7: /* sfence / clflush */ + if ((modrm & 0xc7) == 0xc0) { + /* sfence */ + if (!(s->cpuid_features & CPUID_SSE)) + goto illegal_op; + } else { + /* clflush */ + if (!(s->cpuid_features & CPUID_CLFLUSH)) + goto illegal_op; + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + } + break; + default: + goto illegal_op; + } + break; + case 0x10d: /* prefetch */ + modrm = ldub_code(s->pc++); + gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + /* ignore for now */ + break; + case 0x110 ... 0x117: + case 0x128 ... 0x12f: + case 0x150 ... 0x177: + case 0x17c ... 0x17f: + case 0x1c2: + case 0x1c4 ... 0x1c6: + case 0x1d0 ... 0x1fe: + gen_sse(s, b, pc_start, rex_r); + break; + default: + goto illegal_op; + } + /* lock generation */ + if (s->prefix & PREFIX_LOCK) + gen_op_unlock(); + return s->pc; + illegal_op: + if (s->prefix & PREFIX_LOCK) + gen_op_unlock(); + /* XXX: ensure that no lock was generated */ + gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); + return s->pc; +} + +#define CC_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C) +#define CC_OSZAP (CC_O | CC_S | CC_Z | CC_A | CC_P) + +/* flags read by an operation */ +static uint16_t opc_read_flags[NB_OPS] = { + [INDEX_op_aas] = CC_A, + [INDEX_op_aaa] = CC_A, + [INDEX_op_das] = CC_A | CC_C, + [INDEX_op_daa] = CC_A | CC_C, + + /* subtle: due to the incl/decl implementation, C is used */ + [INDEX_op_update_inc_cc] = CC_C, + + [INDEX_op_into] = CC_O, + + [INDEX_op_jb_subb] = CC_C, + [INDEX_op_jb_subw] = CC_C, + [INDEX_op_jb_subl] = CC_C, + + [INDEX_op_jz_subb] = CC_Z, + [INDEX_op_jz_subw] = CC_Z, + [INDEX_op_jz_subl] = CC_Z, + + [INDEX_op_jbe_subb] = CC_Z | CC_C, + [INDEX_op_jbe_subw] = CC_Z | CC_C, + [INDEX_op_jbe_subl] = CC_Z | CC_C, + + [INDEX_op_js_subb] = CC_S, + [INDEX_op_js_subw] = CC_S, + [INDEX_op_js_subl] = CC_S, + + [INDEX_op_jl_subb] = CC_O | CC_S, + [INDEX_op_jl_subw] = CC_O | CC_S, + [INDEX_op_jl_subl] = CC_O | CC_S, + + [INDEX_op_jle_subb] = CC_O | CC_S | CC_Z, + [INDEX_op_jle_subw] = CC_O | CC_S | CC_Z, + [INDEX_op_jle_subl] = CC_O | CC_S | CC_Z, + + [INDEX_op_loopnzw] = CC_Z, + [INDEX_op_loopnzl] = CC_Z, + [INDEX_op_loopzw] = CC_Z, + [INDEX_op_loopzl] = CC_Z, + + [INDEX_op_seto_T0_cc] = CC_O, + [INDEX_op_setb_T0_cc] = CC_C, + [INDEX_op_setz_T0_cc] = CC_Z, + [INDEX_op_setbe_T0_cc] = CC_Z | CC_C, + [INDEX_op_sets_T0_cc] = CC_S, + [INDEX_op_setp_T0_cc] = CC_P, + [INDEX_op_setl_T0_cc] = CC_O | CC_S, + [INDEX_op_setle_T0_cc] = CC_O | CC_S | CC_Z, + + [INDEX_op_setb_T0_subb] = CC_C, + [INDEX_op_setb_T0_subw] = CC_C, + [INDEX_op_setb_T0_subl] = CC_C, + + [INDEX_op_setz_T0_subb] = CC_Z, + [INDEX_op_setz_T0_subw] = CC_Z, + [INDEX_op_setz_T0_subl] = CC_Z, + + [INDEX_op_setbe_T0_subb] = CC_Z | CC_C, + [INDEX_op_setbe_T0_subw] = CC_Z | CC_C, + [INDEX_op_setbe_T0_subl] = CC_Z | CC_C, + + [INDEX_op_sets_T0_subb] = CC_S, + [INDEX_op_sets_T0_subw] = CC_S, + [INDEX_op_sets_T0_subl] = CC_S, + + [INDEX_op_setl_T0_subb] = CC_O | CC_S, + [INDEX_op_setl_T0_subw] = CC_O | CC_S, + [INDEX_op_setl_T0_subl] = CC_O | CC_S, + + [INDEX_op_setle_T0_subb] = CC_O | CC_S | CC_Z, + [INDEX_op_setle_T0_subw] = CC_O | CC_S | CC_Z, + [INDEX_op_setle_T0_subl] = CC_O | CC_S | CC_Z, + + [INDEX_op_movl_T0_eflags] = CC_OSZAPC, + [INDEX_op_cmc] = CC_C, + [INDEX_op_salc] = CC_C, + + /* needed for correct flag optimisation before string ops */ + [INDEX_op_jnz_ecxw] = CC_OSZAPC, + [INDEX_op_jnz_ecxl] = CC_OSZAPC, + [INDEX_op_jz_ecxw] = CC_OSZAPC, + [INDEX_op_jz_ecxl] = CC_OSZAPC, + +#ifdef TARGET_X86_64 + [INDEX_op_jb_subq] = CC_C, + [INDEX_op_jz_subq] = CC_Z, + [INDEX_op_jbe_subq] = CC_Z | CC_C, + [INDEX_op_js_subq] = CC_S, + [INDEX_op_jl_subq] = CC_O | CC_S, + [INDEX_op_jle_subq] = CC_O | CC_S | CC_Z, + + [INDEX_op_loopnzq] = CC_Z, + [INDEX_op_loopzq] = CC_Z, + + [INDEX_op_setb_T0_subq] = CC_C, + [INDEX_op_setz_T0_subq] = CC_Z, + [INDEX_op_setbe_T0_subq] = CC_Z | CC_C, + [INDEX_op_sets_T0_subq] = CC_S, + [INDEX_op_setl_T0_subq] = CC_O | CC_S, + [INDEX_op_setle_T0_subq] = CC_O | CC_S | CC_Z, + + [INDEX_op_jnz_ecxq] = CC_OSZAPC, + [INDEX_op_jz_ecxq] = CC_OSZAPC, +#endif + +#define DEF_READF(SUFFIX)\ + [INDEX_op_adcb ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_adcw ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_adcl ## SUFFIX ## _T0_T1_cc] = CC_C,\ + X86_64_DEF([INDEX_op_adcq ## SUFFIX ## _T0_T1_cc] = CC_C,)\ + [INDEX_op_sbbb ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_sbbw ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_sbbl ## SUFFIX ## _T0_T1_cc] = CC_C,\ + X86_64_DEF([INDEX_op_sbbq ## SUFFIX ## _T0_T1_cc] = CC_C,)\ +\ + [INDEX_op_rclb ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_rclw ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_rcll ## SUFFIX ## _T0_T1_cc] = CC_C,\ + X86_64_DEF([INDEX_op_rclq ## SUFFIX ## _T0_T1_cc] = CC_C,)\ + [INDEX_op_rcrb ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_rcrw ## SUFFIX ## _T0_T1_cc] = CC_C,\ + [INDEX_op_rcrl ## SUFFIX ## _T0_T1_cc] = CC_C,\ + X86_64_DEF([INDEX_op_rcrq ## SUFFIX ## _T0_T1_cc] = CC_C,) + + DEF_READF( ) + DEF_READF(_raw) +#ifndef CONFIG_USER_ONLY + DEF_READF(_kernel) + DEF_READF(_user) +#endif +}; + +/* flags written by an operation */ +static uint16_t opc_write_flags[NB_OPS] = { + [INDEX_op_update2_cc] = CC_OSZAPC, + [INDEX_op_update1_cc] = CC_OSZAPC, + [INDEX_op_cmpl_T0_T1_cc] = CC_OSZAPC, + [INDEX_op_update_neg_cc] = CC_OSZAPC, + /* subtle: due to the incl/decl implementation, C is used */ + [INDEX_op_update_inc_cc] = CC_OSZAPC, + [INDEX_op_testl_T0_T1_cc] = CC_OSZAPC, + + [INDEX_op_mulb_AL_T0] = CC_OSZAPC, + [INDEX_op_mulw_AX_T0] = CC_OSZAPC, + [INDEX_op_mull_EAX_T0] = CC_OSZAPC, + X86_64_DEF([INDEX_op_mulq_EAX_T0] = CC_OSZAPC,) + [INDEX_op_imulb_AL_T0] = CC_OSZAPC, + [INDEX_op_imulw_AX_T0] = CC_OSZAPC, + [INDEX_op_imull_EAX_T0] = CC_OSZAPC, + X86_64_DEF([INDEX_op_imulq_EAX_T0] = CC_OSZAPC,) + [INDEX_op_imulw_T0_T1] = CC_OSZAPC, + [INDEX_op_imull_T0_T1] = CC_OSZAPC, + X86_64_DEF([INDEX_op_imulq_T0_T1] = CC_OSZAPC,) + + /* sse */ + [INDEX_op_ucomiss] = CC_OSZAPC, + [INDEX_op_ucomisd] = CC_OSZAPC, + [INDEX_op_comiss] = CC_OSZAPC, + [INDEX_op_comisd] = CC_OSZAPC, + + /* bcd */ + [INDEX_op_aam] = CC_OSZAPC, + [INDEX_op_aad] = CC_OSZAPC, + [INDEX_op_aas] = CC_OSZAPC, + [INDEX_op_aaa] = CC_OSZAPC, + [INDEX_op_das] = CC_OSZAPC, + [INDEX_op_daa] = CC_OSZAPC, + + [INDEX_op_movb_eflags_T0] = CC_S | CC_Z | CC_A | CC_P | CC_C, + [INDEX_op_movw_eflags_T0] = CC_OSZAPC, + [INDEX_op_movl_eflags_T0] = CC_OSZAPC, + [INDEX_op_movw_eflags_T0_io] = CC_OSZAPC, + [INDEX_op_movl_eflags_T0_io] = CC_OSZAPC, + [INDEX_op_movw_eflags_T0_cpl0] = CC_OSZAPC, + [INDEX_op_movl_eflags_T0_cpl0] = CC_OSZAPC, + [INDEX_op_clc] = CC_C, + [INDEX_op_stc] = CC_C, + [INDEX_op_cmc] = CC_C, + + [INDEX_op_btw_T0_T1_cc] = CC_OSZAPC, + [INDEX_op_btl_T0_T1_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_btq_T0_T1_cc] = CC_OSZAPC,) + [INDEX_op_btsw_T0_T1_cc] = CC_OSZAPC, + [INDEX_op_btsl_T0_T1_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_btsq_T0_T1_cc] = CC_OSZAPC,) + [INDEX_op_btrw_T0_T1_cc] = CC_OSZAPC, + [INDEX_op_btrl_T0_T1_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_btrq_T0_T1_cc] = CC_OSZAPC,) + [INDEX_op_btcw_T0_T1_cc] = CC_OSZAPC, + [INDEX_op_btcl_T0_T1_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_btcq_T0_T1_cc] = CC_OSZAPC,) + + [INDEX_op_bsfw_T0_cc] = CC_OSZAPC, + [INDEX_op_bsfl_T0_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_bsfq_T0_cc] = CC_OSZAPC,) + [INDEX_op_bsrw_T0_cc] = CC_OSZAPC, + [INDEX_op_bsrl_T0_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_bsrq_T0_cc] = CC_OSZAPC,) + + [INDEX_op_cmpxchgb_T0_T1_EAX_cc] = CC_OSZAPC, + [INDEX_op_cmpxchgw_T0_T1_EAX_cc] = CC_OSZAPC, + [INDEX_op_cmpxchgl_T0_T1_EAX_cc] = CC_OSZAPC, + X86_64_DEF([INDEX_op_cmpxchgq_T0_T1_EAX_cc] = CC_OSZAPC,) + + [INDEX_op_cmpxchg8b] = CC_Z, + [INDEX_op_lar] = CC_Z, + [INDEX_op_lsl] = CC_Z, + [INDEX_op_verr] = CC_Z, + [INDEX_op_verw] = CC_Z, + [INDEX_op_fcomi_ST0_FT0] = CC_Z | CC_P | CC_C, + [INDEX_op_fucomi_ST0_FT0] = CC_Z | CC_P | CC_C, + +#define DEF_WRITEF(SUFFIX)\ + [INDEX_op_adcb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_adcw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_adcl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_adcq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\ + [INDEX_op_sbbb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_sbbw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_sbbl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_sbbq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_rolb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rolw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_roll ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + X86_64_DEF([INDEX_op_rolq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\ + [INDEX_op_rorb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rorw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rorl ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + X86_64_DEF([INDEX_op_rorq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\ +\ + [INDEX_op_rclb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rclw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rcll ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + X86_64_DEF([INDEX_op_rclq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\ + [INDEX_op_rcrb ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rcrw ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + [INDEX_op_rcrl ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,\ + X86_64_DEF([INDEX_op_rcrq ## SUFFIX ## _T0_T1_cc] = CC_O | CC_C,)\ +\ + [INDEX_op_shlb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_shlw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_shll ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shlq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_shrb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_shrw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_shrl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shrq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_sarb ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_sarw ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + [INDEX_op_sarl ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_sarq ## SUFFIX ## _T0_T1_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_shldw ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\ + [INDEX_op_shldl ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shldq ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,)\ + [INDEX_op_shldw ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\ + [INDEX_op_shldl ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shldq ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_shrdw ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\ + [INDEX_op_shrdl ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shrdq ## SUFFIX ## _T0_T1_ECX_cc] = CC_OSZAPC,)\ + [INDEX_op_shrdw ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\ + [INDEX_op_shrdl ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_shrdq ## SUFFIX ## _T0_T1_im_cc] = CC_OSZAPC,)\ +\ + [INDEX_op_cmpxchgb ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\ + [INDEX_op_cmpxchgw ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\ + [INDEX_op_cmpxchgl ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,\ + X86_64_DEF([INDEX_op_cmpxchgq ## SUFFIX ## _T0_T1_EAX_cc] = CC_OSZAPC,) + + + DEF_WRITEF( ) + DEF_WRITEF(_raw) +#ifndef CONFIG_USER_ONLY + DEF_WRITEF(_kernel) + DEF_WRITEF(_user) +#endif +}; + +/* simpler form of an operation if no flags need to be generated */ +static uint16_t opc_simpler[NB_OPS] = { + [INDEX_op_update2_cc] = INDEX_op_nop, + [INDEX_op_update1_cc] = INDEX_op_nop, + [INDEX_op_update_neg_cc] = INDEX_op_nop, +#if 0 + /* broken: CC_OP logic must be rewritten */ + [INDEX_op_update_inc_cc] = INDEX_op_nop, +#endif + + [INDEX_op_shlb_T0_T1_cc] = INDEX_op_shlb_T0_T1, + [INDEX_op_shlw_T0_T1_cc] = INDEX_op_shlw_T0_T1, + [INDEX_op_shll_T0_T1_cc] = INDEX_op_shll_T0_T1, + X86_64_DEF([INDEX_op_shlq_T0_T1_cc] = INDEX_op_shlq_T0_T1,) + + [INDEX_op_shrb_T0_T1_cc] = INDEX_op_shrb_T0_T1, + [INDEX_op_shrw_T0_T1_cc] = INDEX_op_shrw_T0_T1, + [INDEX_op_shrl_T0_T1_cc] = INDEX_op_shrl_T0_T1, + X86_64_DEF([INDEX_op_shrq_T0_T1_cc] = INDEX_op_shrq_T0_T1,) + + [INDEX_op_sarb_T0_T1_cc] = INDEX_op_sarb_T0_T1, + [INDEX_op_sarw_T0_T1_cc] = INDEX_op_sarw_T0_T1, + [INDEX_op_sarl_T0_T1_cc] = INDEX_op_sarl_T0_T1, + X86_64_DEF([INDEX_op_sarq_T0_T1_cc] = INDEX_op_sarq_T0_T1,) + +#define DEF_SIMPLER(SUFFIX)\ + [INDEX_op_rolb ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolb ## SUFFIX ## _T0_T1,\ + [INDEX_op_rolw ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolw ## SUFFIX ## _T0_T1,\ + [INDEX_op_roll ## SUFFIX ## _T0_T1_cc] = INDEX_op_roll ## SUFFIX ## _T0_T1,\ + X86_64_DEF([INDEX_op_rolq ## SUFFIX ## _T0_T1_cc] = INDEX_op_rolq ## SUFFIX ## _T0_T1,)\ +\ + [INDEX_op_rorb ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorb ## SUFFIX ## _T0_T1,\ + [INDEX_op_rorw ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorw ## SUFFIX ## _T0_T1,\ + [INDEX_op_rorl ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorl ## SUFFIX ## _T0_T1,\ + X86_64_DEF([INDEX_op_rorq ## SUFFIX ## _T0_T1_cc] = INDEX_op_rorq ## SUFFIX ## _T0_T1,) + + DEF_SIMPLER( ) + DEF_SIMPLER(_raw) +#ifndef CONFIG_USER_ONLY + DEF_SIMPLER(_kernel) + DEF_SIMPLER(_user) +#endif +}; + +void optimize_flags_init(void) +{ + int i; + /* put default values in arrays */ + for(i = 0; i < NB_OPS; i++) { + if (opc_simpler[i] == 0) + opc_simpler[i] = i; + } +} + +/* CPU flags computation optimization: we move backward thru the + generated code to see which flags are needed. The operation is + modified if suitable */ +static void optimize_flags(uint16_t *opc_buf, int opc_buf_len) +{ + uint16_t *opc_ptr; + int live_flags, write_flags, op; + + opc_ptr = opc_buf + opc_buf_len; + /* live_flags contains the flags needed by the next instructions + in the code. At the end of the bloc, we consider that all the + flags are live. */ + live_flags = CC_OSZAPC; + while (opc_ptr > opc_buf) { + op = *--opc_ptr; + /* if none of the flags written by the instruction is used, + then we can try to find a simpler instruction */ + write_flags = opc_write_flags[op]; + if ((live_flags & write_flags) == 0) { + *opc_ptr = opc_simpler[op]; + } + /* compute the live flags before the instruction */ + live_flags &= ~write_flags; + live_flags |= opc_read_flags[op]; + } +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +static inline int gen_intermediate_code_internal(CPUState *env, + TranslationBlock *tb, + int search_pc) +{ + DisasContext dc1, *dc = &dc1; + target_ulong pc_ptr; + uint16_t *gen_opc_end; + int flags, j, lj, cflags; + target_ulong pc_start; + target_ulong cs_base; + + /* generate intermediate code */ + pc_start = tb->pc; + cs_base = tb->cs_base; + flags = tb->flags; + cflags = tb->cflags; + + dc->pe = (flags >> HF_PE_SHIFT) & 1; + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; + dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; + dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; + dc->f_st = 0; + dc->vm86 = (flags >> VM_SHIFT) & 1; + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; + dc->iopl = (flags >> IOPL_SHIFT) & 3; + dc->tf = (flags >> TF_SHIFT) & 1; + dc->singlestep_enabled = env->singlestep_enabled; + dc->cc_op = CC_OP_DYNAMIC; + dc->cs_base = cs_base; + dc->tb = tb; + dc->popl_esp_hack = 0; + /* select memory access functions */ + dc->mem_index = 0; + if (flags & HF_SOFTMMU_MASK) { + if (dc->cpl == 3) + dc->mem_index = 2 * 4; + else + dc->mem_index = 1 * 4; + } + dc->cpuid_features = env->cpuid_features; + dc->cpuid_ext_features = env->cpuid_ext_features; +#ifdef TARGET_X86_64 + dc->lma = (flags >> HF_LMA_SHIFT) & 1; + dc->code64 = (flags >> HF_CS64_SHIFT) & 1; +#endif + dc->flags = flags; + dc->jmp_opt = !(dc->tf || env->singlestep_enabled || + (flags & HF_INHIBIT_IRQ_MASK) +#ifndef CONFIG_SOFTMMU + || (flags & HF_SOFTMMU_MASK) +#endif + ); +#if 0 + /* check addseg logic */ + if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) + printf("ERROR addseg\n"); +#endif + + gen_opc_ptr = gen_opc_buf; + gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; + gen_opparam_ptr = gen_opparam_buf; + nb_gen_labels = 0; + + dc->is_jmp = DISAS_NEXT; + pc_ptr = pc_start; + lj = -1; + + for(;;) { + if (env->nb_breakpoints > 0) { + for(j = 0; j < env->nb_breakpoints; j++) { + if (env->breakpoints[j] == pc_ptr) { + gen_debug(dc, pc_ptr - dc->cs_base); + break; + } + } + } + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + gen_opc_instr_start[lj++] = 0; + } + gen_opc_pc[lj] = pc_ptr; + gen_opc_cc_op[lj] = dc->cc_op; + gen_opc_instr_start[lj] = 1; + } + pc_ptr = disas_insn(dc, pc_ptr); + /* stop translation if indicated */ + if (dc->is_jmp) + break; + /* if single step mode, we generate only one instruction and + generate an exception */ + /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear + the flag and abort the translation to give the irqs a + change to be happen */ + if (dc->tf || dc->singlestep_enabled || + (flags & HF_INHIBIT_IRQ_MASK) || + (cflags & CF_SINGLE_INSN)) { + gen_jmp_im(pc_ptr - dc->cs_base); + gen_eob(dc); + break; + } + /* if too long translation, stop generation too */ + if (gen_opc_ptr >= gen_opc_end || + (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) { + gen_jmp_im(pc_ptr - dc->cs_base); + gen_eob(dc); + break; + } + } + *gen_opc_ptr = INDEX_op_end; + /* we don't forget to fill the last values */ + if (search_pc) { + j = gen_opc_ptr - gen_opc_buf; + lj++; + while (lj <= j) + gen_opc_instr_start[lj++] = 0; + } + +#ifdef DEBUG_DISAS + if (loglevel & CPU_LOG_TB_CPU) { + cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP); + } + if (loglevel & CPU_LOG_TB_IN_ASM) { + int disas_flags; + fprintf(logfile, "----------------\n"); + fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start)); +#ifdef TARGET_X86_64 + if (dc->code64) + disas_flags = 2; + else +#endif + disas_flags = !dc->code32; + target_disas(logfile, pc_start, pc_ptr - pc_start, disas_flags); + fprintf(logfile, "\n"); + if (loglevel & CPU_LOG_TB_OP) { + fprintf(logfile, "OP:\n"); + dump_ops(gen_opc_buf, gen_opparam_buf); + fprintf(logfile, "\n"); + } + } +#endif + + /* optimize flag computations */ + optimize_flags(gen_opc_buf, gen_opc_ptr - gen_opc_buf); + +#ifdef DEBUG_DISAS + if (loglevel & CPU_LOG_TB_OP_OPT) { + fprintf(logfile, "AFTER FLAGS OPT:\n"); + dump_ops(gen_opc_buf, gen_opparam_buf); + fprintf(logfile, "\n"); + } +#endif + if (!search_pc) + tb->size = pc_ptr - pc_start; + return 0; +} + +int gen_intermediate_code(CPUState *env, TranslationBlock *tb) +{ + return gen_intermediate_code_internal(env, tb, 0); +} + +int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb) +{ + return gen_intermediate_code_internal(env, tb, 1); +} + |