aboutsummaryrefslogtreecommitdiffstats
path: root/tcg
diff options
context:
space:
mode:
authorThe Android Open Source Project <initial-contribution@android.com>2009-03-03 19:30:32 -0800
committerThe Android Open Source Project <initial-contribution@android.com>2009-03-03 19:30:32 -0800
commit8b23a6c7e1aee255004dd19098d4c2462b61b849 (patch)
tree7a4d682ba51f0ff0364c5ca2509f515bdaf96de9 /tcg
parentf721e3ac031f892af46f255a47d7f54a91317b30 (diff)
downloadexternal_qemu-8b23a6c7e1aee255004dd19098d4c2462b61b849.zip
external_qemu-8b23a6c7e1aee255004dd19098d4c2462b61b849.tar.gz
external_qemu-8b23a6c7e1aee255004dd19098d4c2462b61b849.tar.bz2
auto import from //depot/cupcake/@135843
Diffstat (limited to 'tcg')
-rw-r--r--tcg/LICENSE3
-rw-r--r--tcg/README425
-rw-r--r--tcg/TODO15
-rw-r--r--tcg/arm/tcg-target.c1584
-rw-r--r--tcg/arm/tcg-target.h76
-rw-r--r--tcg/hppa/tcg-target.c973
-rw-r--r--tcg/hppa/tcg-target.h204
-rw-r--r--tcg/i386/tcg-target.c1185
-rw-r--r--tcg/i386/tcg-target.h55
-rw-r--r--tcg/ppc/tcg-target.c1492
-rw-r--r--tcg/ppc/tcg-target.h105
-rw-r--r--tcg/ppc64/tcg-target.c1491
-rw-r--r--tcg/ppc64/tcg-target.h105
-rw-r--r--tcg/sparc/tcg-target.c1206
-rw-r--r--tcg/sparc/tcg-target.h122
-rw-r--r--tcg/tcg-dyngen.c431
-rw-r--r--tcg/tcg-op.h1713
-rw-r--r--tcg/tcg-opc.h238
-rw-r--r--tcg/tcg-runtime.c68
-rw-r--r--tcg/tcg.c2081
-rw-r--r--tcg/tcg.h421
-rw-r--r--tcg/x86_64/tcg-target.c1307
-rw-r--r--tcg/x86_64/tcg-target.h77
23 files changed, 15377 insertions, 0 deletions
diff --git a/tcg/LICENSE b/tcg/LICENSE
new file mode 100644
index 0000000..be817fa
--- /dev/null
+++ b/tcg/LICENSE
@@ -0,0 +1,3 @@
+All the files in this directory and subdirectories are released under
+a BSD like license (see header in each file). No other license is
+accepted.
diff --git a/tcg/README b/tcg/README
new file mode 100644
index 0000000..b03432e
--- /dev/null
+++ b/tcg/README
@@ -0,0 +1,425 @@
+Tiny Code Generator - Fabrice Bellard.
+
+1) Introduction
+
+TCG (Tiny Code Generator) began as a generic backend for a C
+compiler. It was simplified to be used in QEMU. It also has its roots
+in the QOP code generator written by Paul Brook.
+
+2) Definitions
+
+The TCG "target" is the architecture for which we generate the
+code. It is of course not the same as the "target" of QEMU which is
+the emulated architecture. As TCG started as a generic C backend used
+for cross compiling, it is assumed that the TCG target is different
+from the host, although it is never the case for QEMU.
+
+A TCG "function" corresponds to a QEMU Translated Block (TB).
+
+A TCG "temporary" is a variable only live in a basic
+block. Temporaries are allocated explicitly in each function.
+
+A TCG "local temporary" is a variable only live in a function. Local
+temporaries are allocated explicitly in each function.
+
+A TCG "global" is a variable which is live in all the functions
+(equivalent of a C global variable). They are defined before the
+functions defined. A TCG global can be a memory location (e.g. a QEMU
+CPU register), a fixed host register (e.g. the QEMU CPU state pointer)
+or a memory location which is stored in a register outside QEMU TBs
+(not implemented yet).
+
+A TCG "basic block" corresponds to a list of instructions terminated
+by a branch instruction.
+
+3) Intermediate representation
+
+3.1) Introduction
+
+TCG instructions operate on variables which are temporaries, local
+temporaries or globals. TCG instructions and variables are strongly
+typed. Two types are supported: 32 bit integers and 64 bit
+integers. Pointers are defined as an alias to 32 bit or 64 bit
+integers depending on the TCG target word size.
+
+Each instruction has a fixed number of output variable operands, input
+variable operands and always constant operands.
+
+The notable exception is the call instruction which has a variable
+number of outputs and inputs.
+
+In the textual form, output operands usually come first, followed by
+input operands, followed by constant operands. The output type is
+included in the instruction name. Constants are prefixed with a '$'.
+
+add_i32 t0, t1, t2 (t0 <- t1 + t2)
+
+3.2) Assumptions
+
+* Basic blocks
+
+- Basic blocks end after branches (e.g. brcond_i32 instruction),
+ goto_tb and exit_tb instructions.
+- Basic blocks end before legacy dyngen operations.
+- Basic blocks start after the end of a previous basic block, at a
+ set_label instruction or after a legacy dyngen operation.
+
+After the end of a basic block, the content of temporaries is
+destroyed, but local temporaries and globals are preserved.
+
+* Floating point types are not supported yet
+
+* Pointers: depending on the TCG target, pointer size is 32 bit or 64
+ bit. The type TCG_TYPE_PTR is an alias to TCG_TYPE_I32 or
+ TCG_TYPE_I64.
+
+* Helpers:
+
+Using the tcg_gen_helper_x_y it is possible to call any function
+taking i32, i64 or pointer types. Before calling an helper, all
+globals are stored at their canonical location and it is assumed that
+the function can modify them. In the future, function modifiers will
+be allowed to tell that the helper does not read or write some globals.
+
+On some TCG targets (e.g. x86), several calling conventions are
+supported.
+
+* Branches:
+
+Use the instruction 'br' to jump to a label. Use 'jmp' to jump to an
+explicit address. Conditional branches can only jump to labels.
+
+3.3) Code Optimizations
+
+When generating instructions, you can count on at least the following
+optimizations:
+
+- Single instructions are simplified, e.g.
+
+ and_i32 t0, t0, $0xffffffff
+
+ is suppressed.
+
+- A liveness analysis is done at the basic block level. The
+ information is used to suppress moves from a dead variable to
+ another one. It is also used to remove instructions which compute
+ dead results. The later is especially useful for condition code
+ optimization in QEMU.
+
+ In the following example:
+
+ add_i32 t0, t1, t2
+ add_i32 t0, t0, $1
+ mov_i32 t0, $1
+
+ only the last instruction is kept.
+
+3.4) Instruction Reference
+
+********* Function call
+
+* call <ret> <params> ptr
+
+call function 'ptr' (pointer type)
+
+<ret> optional 32 bit or 64 bit return value
+<params> optional 32 bit or 64 bit parameters
+
+********* Jumps/Labels
+
+* jmp t0
+
+Absolute jump to address t0 (pointer type).
+
+* set_label $label
+
+Define label 'label' at the current program point.
+
+* br $label
+
+Jump to label.
+
+* brcond_i32/i64 cond, t0, t1, label
+
+Conditional jump if t0 cond t1 is true. cond can be:
+ TCG_COND_EQ
+ TCG_COND_NE
+ TCG_COND_LT /* signed */
+ TCG_COND_GE /* signed */
+ TCG_COND_LE /* signed */
+ TCG_COND_GT /* signed */
+ TCG_COND_LTU /* unsigned */
+ TCG_COND_GEU /* unsigned */
+ TCG_COND_LEU /* unsigned */
+ TCG_COND_GTU /* unsigned */
+
+********* Arithmetic
+
+* add_i32/i64 t0, t1, t2
+
+t0=t1+t2
+
+* sub_i32/i64 t0, t1, t2
+
+t0=t1-t2
+
+* neg_i32/i64 t0, t1
+
+t0=-t1 (two's complement)
+
+* mul_i32/i64 t0, t1, t2
+
+t0=t1*t2
+
+* div_i32/i64 t0, t1, t2
+
+t0=t1/t2 (signed). Undefined behavior if division by zero or overflow.
+
+* divu_i32/i64 t0, t1, t2
+
+t0=t1/t2 (unsigned). Undefined behavior if division by zero.
+
+* rem_i32/i64 t0, t1, t2
+
+t0=t1%t2 (signed). Undefined behavior if division by zero or overflow.
+
+* remu_i32/i64 t0, t1, t2
+
+t0=t1%t2 (unsigned). Undefined behavior if division by zero.
+
+********* Logical
+
+* and_i32/i64 t0, t1, t2
+
+t0=t1&t2
+
+* or_i32/i64 t0, t1, t2
+
+t0=t1|t2
+
+* xor_i32/i64 t0, t1, t2
+
+t0=t1^t2
+
+* not_i32/i64 t0, t1
+
+t0=~t1
+
+********* Shifts
+
+* shl_i32/i64 t0, t1, t2
+
+t0=t1 << t2. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+
+* shr_i32/i64 t0, t1, t2
+
+t0=t1 >> t2 (unsigned). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+
+* sar_i32/i64 t0, t1, t2
+
+t0=t1 >> t2 (signed). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+
+********* Misc
+
+* mov_i32/i64 t0, t1
+
+t0 = t1
+
+Move t1 to t0 (both operands must have the same type).
+
+* ext8s_i32/i64 t0, t1
+ext8u_i32/i64 t0, t1
+ext16s_i32/i64 t0, t1
+ext16u_i32/i64 t0, t1
+ext32s_i64 t0, t1
+ext32u_i64 t0, t1
+
+8, 16 or 32 bit sign/zero extension (both operands must have the same type)
+
+* bswap16_i32 t0, t1
+
+16 bit byte swap on a 32 bit value. The two high order bytes must be set
+to zero.
+
+* bswap_i32 t0, t1
+
+32 bit byte swap
+
+* bswap_i64 t0, t1
+
+64 bit byte swap
+
+* discard_i32/i64 t0
+
+Indicate that the value of t0 won't be used later. It is useful to
+force dead code elimination.
+
+********* Type conversions
+
+* ext_i32_i64 t0, t1
+Convert t1 (32 bit) to t0 (64 bit) and does sign extension
+
+* extu_i32_i64 t0, t1
+Convert t1 (32 bit) to t0 (64 bit) and does zero extension
+
+* trunc_i64_i32 t0, t1
+Truncate t1 (64 bit) to t0 (32 bit)
+
+********* Load/Store
+
+* ld_i32/i64 t0, t1, offset
+ld8s_i32/i64 t0, t1, offset
+ld8u_i32/i64 t0, t1, offset
+ld16s_i32/i64 t0, t1, offset
+ld16u_i32/i64 t0, t1, offset
+ld32s_i64 t0, t1, offset
+ld32u_i64 t0, t1, offset
+
+t0 = read(t1 + offset)
+Load 8, 16, 32 or 64 bits with or without sign extension from host memory.
+offset must be a constant.
+
+* st_i32/i64 t0, t1, offset
+st8_i32/i64 t0, t1, offset
+st16_i32/i64 t0, t1, offset
+st32_i64 t0, t1, offset
+
+write(t0, t1 + offset)
+Write 8, 16, 32 or 64 bits to host memory.
+
+********* QEMU specific operations
+
+* tb_exit t0
+
+Exit the current TB and return the value t0 (word type).
+
+* goto_tb index
+
+Exit the current TB and jump to the TB index 'index' (constant) if the
+current TB was linked to this TB. Otherwise execute the next
+instructions.
+
+* qemu_ld_i32/i64 t0, t1, flags
+qemu_ld8u_i32/i64 t0, t1, flags
+qemu_ld8s_i32/i64 t0, t1, flags
+qemu_ld16u_i32/i64 t0, t1, flags
+qemu_ld16s_i32/i64 t0, t1, flags
+qemu_ld32u_i64 t0, t1, flags
+qemu_ld32s_i64 t0, t1, flags
+
+Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU
+address type. 'flags' contains the QEMU memory index (selects user or
+kernel access) for example.
+
+* qemu_st_i32/i64 t0, t1, flags
+qemu_st8_i32/i64 t0, t1, flags
+qemu_st16_i32/i64 t0, t1, flags
+qemu_st32_i64 t0, t1, flags
+
+Store the data t0 at the QEMU CPU Address t1. t1 has the QEMU CPU
+address type. 'flags' contains the QEMU memory index (selects user or
+kernel access) for example.
+
+Note 1: Some shortcuts are defined when the last operand is known to be
+a constant (e.g. addi for add, movi for mov).
+
+Note 2: When using TCG, the opcodes must never be generated directly
+as some of them may not be available as "real" opcodes. Always use the
+function tcg_gen_xxx(args).
+
+4) Backend
+
+tcg-target.h contains the target specific definitions. tcg-target.c
+contains the target specific code.
+
+4.1) Assumptions
+
+The target word size (TCG_TARGET_REG_BITS) is expected to be 32 bit or
+64 bit. It is expected that the pointer has the same size as the word.
+
+On a 32 bit target, all 64 bit operations are converted to 32 bits. A
+few specific operations must be implemented to allow it (see add2_i32,
+sub2_i32, brcond2_i32).
+
+Floating point operations are not supported in this version. A
+previous incarnation of the code generator had full support of them,
+but it is better to concentrate on integer operations first.
+
+On a 64 bit target, no assumption is made in TCG about the storage of
+the 32 bit values in 64 bit registers.
+
+4.2) Constraints
+
+GCC like constraints are used to define the constraints of every
+instruction. Memory constraints are not supported in this
+version. Aliases are specified in the input operands as for GCC.
+
+A target can define specific register or constant constraints. If an
+operation uses a constant input constraint which does not allow all
+constants, it must also accept registers in order to have a fallback.
+
+The movi_i32 and movi_i64 operations must accept any constants.
+
+The mov_i32 and mov_i64 operations must accept any registers of the
+same type.
+
+The ld/st instructions must accept signed 32 bit constant offsets. It
+can be implemented by reserving a specific register to compute the
+address if the offset is too big.
+
+The ld/st instructions must accept any destination (ld) or source (st)
+register.
+
+4.3) Function call assumptions
+
+- The only supported types for parameters and return value are: 32 and
+ 64 bit integers and pointer.
+- The stack grows downwards.
+- The first N parameters are passed in registers.
+- The next parameters are passed on the stack by storing them as words.
+- Some registers are clobbered during the call.
+- The function can return 0 or 1 value in registers. On a 32 bit
+ target, functions must be able to return 2 values in registers for
+ 64 bit return type.
+
+5) Migration from dyngen to TCG
+
+TCG is backward compatible with QEMU "dyngen" operations. It means
+that TCG instructions can be freely mixed with dyngen operations. It
+is expected that QEMU targets will be progressively fully converted to
+TCG. Once a target is fully converted to TCG, it will be possible
+to apply more optimizations because more registers will be free for
+the generated code.
+
+The exception model is the same as the dyngen one.
+
+6) Recommended coding rules for best performance
+
+- Use globals to represent the parts of the QEMU CPU state which are
+ often modified, e.g. the integer registers and the condition
+ codes. TCG will be able to use host registers to store them.
+
+- Avoid globals stored in fixed registers. They must be used only to
+ store the pointer to the CPU state and possibly to store a pointer
+ to a register window. The other uses are to ensure backward
+ compatibility with dyngen during the porting a new target to TCG.
+
+- Use temporaries. Use local temporaries only when really needed,
+ e.g. when you need to use a value after a jump. Local temporaries
+ introduce a performance hit in the current TCG implementation: their
+ content is saved to memory at end of each basic block.
+
+- Free temporaries and local temporaries when they are no longer used
+ (tcg_temp_free). Since tcg_const_x() also creates a temporary, you
+ should free it after it is used. Freeing temporaries does not yield
+ a better generated code, but it reduces the memory usage of TCG and
+ the speed of the translation.
+
+- Don't hesitate to use helpers for complicated or seldom used target
+ intructions. There is little performance advantage in using TCG to
+ implement target instructions taking more than about twenty TCG
+ instructions.
+
+- Use the 'discard' instruction if you know that TCG won't be able to
+ prove that a given global is "dead" at a given program point. The
+ x86 target uses it to improve the condition codes optimisation.
diff --git a/tcg/TODO b/tcg/TODO
new file mode 100644
index 0000000..5ca35e9
--- /dev/null
+++ b/tcg/TODO
@@ -0,0 +1,15 @@
+- Add new instructions such as: andnot, ror, rol, setcond, clz, ctz,
+ popcnt.
+
+- See if it is worth exporting mul2, mulu2, div2, divu2.
+
+- Support of globals saved in fixed registers between TBs.
+
+Ideas:
+
+- Move the slow part of the qemu_ld/st ops after the end of the TB.
+
+- Change exception syntax to get closer to QOP system (exception
+ parameters given with a specific instruction).
+
+- Add float and vector support.
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
new file mode 100644
index 0000000..dee1ebc
--- /dev/null
+++ b/tcg/arm/tcg-target.c
@@ -0,0 +1,1584 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Andrzej Zaborowski
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%r0",
+ "%r1",
+ "%r2",
+ "%r3",
+ "%r4",
+ "%r5",
+ "%r6",
+ "%r7",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+};
+
+int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+};
+
+const int tcg_target_call_iarg_regs[4] = {
+ TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
+};
+const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R0, TCG_REG_R1
+};
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ switch (type) {
+ case R_ARM_ABS32:
+ *(uint32_t *) code_ptr = value;
+ break;
+
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
+ default:
+ tcg_abort();
+
+ case R_ARM_PC24:
+ *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
+ (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
+ break;
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 4;
+}
+
+/* parse target specific constraints */
+int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+#ifndef CONFIG_SOFTMMU
+ case 'd':
+ case 'D':
+ case 'x':
+ case 'X':
+#endif
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ break;
+
+#ifdef CONFIG_SOFTMMU
+ /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
+ case 'x':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ /* qemu_ld64 data_reg */
+ case 'd':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r1 is still needed to load data_reg2, so don't use it. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ /* qemu_ld/st64 data_reg2 */
+ case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r0, r1 and optionally r2 will be overwritten by the address
+ * and the low word of data, so don't use these. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+# if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
+# endif
+ break;
+
+# if TARGET_LONG_BITS == 64
+ /* qemu_ld/st addr_reg2 */
+ case 'X':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r0 will be overwritten by the low word of base, so don't use it. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+# endif
+#endif
+
+ case '1':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ break;
+
+ case '2':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+
+ return 0;
+}
+
+/* Test if a constant matches the constraint.
+ * TODO: define constraints for:
+ *
+ * ldr/str offset: between -0xfff and 0xfff
+ * ldrh/strh offset: between -0xff and 0xff
+ * mov operand2: values represented with x << (2 * y), x < 0x100
+ * add, sub, eor...: ditto
+ */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else
+ return 0;
+}
+
+enum arm_data_opc_e {
+ ARITH_AND = 0x0,
+ ARITH_EOR = 0x1,
+ ARITH_SUB = 0x2,
+ ARITH_RSB = 0x3,
+ ARITH_ADD = 0x4,
+ ARITH_ADC = 0x5,
+ ARITH_SBC = 0x6,
+ ARITH_RSC = 0x7,
+ ARITH_TST = 0x8,
+ ARITH_CMP = 0xa,
+ ARITH_CMN = 0xb,
+ ARITH_ORR = 0xc,
+ ARITH_MOV = 0xd,
+ ARITH_BIC = 0xe,
+ ARITH_MVN = 0xf,
+};
+
+#define TO_CPSR(opc) \
+ ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
+
+#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
+#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
+#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
+#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
+#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
+#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
+#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
+#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
+
+enum arm_cond_code_e {
+ COND_EQ = 0x0,
+ COND_NE = 0x1,
+ COND_CS = 0x2, /* Unsigned greater or equal */
+ COND_CC = 0x3, /* Unsigned less than */
+ COND_MI = 0x4, /* Negative */
+ COND_PL = 0x5, /* Zero or greater */
+ COND_VS = 0x6, /* Overflow */
+ COND_VC = 0x7, /* No overflow */
+ COND_HI = 0x8, /* Unsigned greater than */
+ COND_LS = 0x9, /* Unsigned less or equal */
+ COND_GE = 0xa,
+ COND_LT = 0xb,
+ COND_GT = 0xc,
+ COND_LE = 0xd,
+ COND_AL = 0xe,
+};
+
+static const uint8_t tcg_cond_to_arm_cond[10] = {
+ [TCG_COND_EQ] = COND_EQ,
+ [TCG_COND_NE] = COND_NE,
+ [TCG_COND_LT] = COND_LT,
+ [TCG_COND_GE] = COND_GE,
+ [TCG_COND_LE] = COND_LE,
+ [TCG_COND_GT] = COND_GT,
+ /* unsigned */
+ [TCG_COND_LTU] = COND_CC,
+ [TCG_COND_GEU] = COND_CS,
+ [TCG_COND_LEU] = COND_LS,
+ [TCG_COND_GTU] = COND_HI,
+};
+
+static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
+{
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+}
+
+static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
+{
+ tcg_out32(s, (cond << 28) | 0x0a000000 |
+ (((offset - 8) >> 2) & 0x00ffffff));
+}
+
+static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
+{
+#ifdef WORDS_BIGENDIAN
+ tcg_out8(s, (cond << 4) | 0x0a);
+ s->code_ptr += 3;
+#else
+ s->code_ptr += 3;
+ tcg_out8(s, (cond << 4) | 0x0a);
+#endif
+}
+
+static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
+{
+ tcg_out32(s, (cond << 28) | 0x0b000000 |
+ (((offset - 8) >> 2) & 0x00ffffff));
+}
+
+static inline void tcg_out_dat_reg(TCGContext *s,
+ int cond, int opc, int rd, int rn, int rm, int shift)
+{
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
+ (rn << 16) | (rd << 12) | shift | rm);
+}
+
+static inline void tcg_out_dat_reg2(TCGContext *s,
+ int cond, int opc0, int opc1, int rd0, int rd1,
+ int rn0, int rn1, int rm0, int rm1, int shift)
+{
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
+ (rn0 << 16) | (rd0 << 12) | shift | rm0);
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
+ (rn1 << 16) | (rd1 << 12) | shift | rm1);
+}
+
+static inline void tcg_out_dat_imm(TCGContext *s,
+ int cond, int opc, int rd, int rn, int im)
+{
+ tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
+ (rn << 16) | (rd << 12) | im);
+}
+
+static inline void tcg_out_movi32(TCGContext *s,
+ int cond, int rd, int32_t arg)
+{
+ int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
+
+ /* TODO: This is very suboptimal, we can easily have a constant
+ * pool somewhere after all the instructions. */
+
+ if (arg < 0 && arg > -0x100)
+ return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
+
+ if (offset < 0x100 && offset > -0x100)
+ return offset >= 0 ?
+ tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
+ tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
+
+ tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
+ if (arg & 0x0000ff00)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 8) & 0xff) | 0xc00);
+ if (arg & 0x00ff0000)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 16) & 0xff) | 0x800);
+ if (arg & 0xff000000)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 24) & 0xff) | 0x400);
+}
+
+static inline void tcg_out_mul32(TCGContext *s,
+ int cond, int rd, int rs, int rm)
+{
+ if (rd != rm)
+ tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
+ (rs << 8) | 0x90 | rm);
+ else if (rd != rs)
+ tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
+ (rm << 8) | 0x90 | rs);
+ else {
+ tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
+ (rs << 8) | 0x90 | rm);
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ rd, 0, 8, SHIFT_IMM_LSL(0));
+ }
+}
+
+static inline void tcg_out_umull32(TCGContext *s,
+ int cond, int rd0, int rd1, int rs, int rm)
+{
+ if (rd0 != rm && rd1 != rm)
+ tcg_out32(s, (cond << 28) | 0x800090 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
+ else if (rd0 != rs && rd1 != rs)
+ tcg_out32(s, (cond << 28) | 0x800090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
+ else {
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0x800098 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8));
+ }
+}
+
+static inline void tcg_out_smull32(TCGContext *s,
+ int cond, int rd0, int rd1, int rs, int rm)
+{
+ if (rd0 != rm && rd1 != rm)
+ tcg_out32(s, (cond << 28) | 0xc00090 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
+ else if (rd0 != rs && rd1 != rs)
+ tcg_out32(s, (cond << 28) | 0xc00090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
+ else {
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0xc00098 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8));
+ }
+}
+
+static inline void tcg_out_ld32_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05900000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05100000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_st32_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05800000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05000000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_ld32_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07900000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st32_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07800000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+/* Register pre-increment with base writeback. */
+static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07b00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07a00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000b0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000b0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st16u_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000b0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000b0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000b0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st16u_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000b0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000f0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000f0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st16s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000f0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000f0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000f0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st16s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000f0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld8_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05d00000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05500000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_st8_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05c00000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05400000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_ld8_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07d00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st8_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07c00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000d0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000d0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st8s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000d0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000d0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000d0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st8s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000d0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld32u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld32_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st32(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st32_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld16u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld16u_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld16s(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld16s_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st16u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st16u_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld8u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld8_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld8s(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld8s_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st8u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st8_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
+{
+ int32_t val;
+
+ val = addr - (tcg_target_long) s->code_ptr;
+ if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
+ tcg_out_b(s, cond, val);
+ else {
+#if 1
+ tcg_abort();
+#else
+ if (cond == COND_AL) {
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
+ } else {
+ tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
+ tcg_out_dat_reg(s, cond, ARITH_ADD,
+ 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
+ }
+#endif
+ }
+}
+
+static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
+{
+ int32_t val;
+
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
+#endif
+
+ val = addr - (tcg_target_long) s->code_ptr;
+ if (val < 0x01fffffd && val > -0x01fffffd)
+ tcg_out_bl(s, cond, val);
+ else {
+#if 1
+ tcg_abort();
+#else
+ if (cond == COND_AL) {
+ tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
+ } else {
+ tcg_out_movi32(s, cond, TCG_REG_R9, addr);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
+ tcg_out_bx(s, cond, TCG_REG_R9);
+ }
+#endif
+ }
+
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+#endif
+}
+
+static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
+{
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
+#endif
+ /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
+ tcg_out_bx(s, cond, arg);
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+#endif
+}
+
+static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out_goto(s, cond, l->u.value);
+ else if (cond == COND_AL) {
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
+ s->code_ptr += 4;
+ } else {
+ /* Probably this should be preferred even for COND_AL... */
+ tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
+ tcg_out_b_noaddr(s, cond);
+ }
+}
+
+static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
+ void *helper_div, void *helper_rem, int shift)
+{
+ int div_reg = args[0];
+ int rem_reg = args[1];
+
+ /* stmdb sp!, { r0 - r3, ip, lr } */
+ /* (Note that we need an even number of registers as per EABI) */
+ tcg_out32(s, (cond << 28) | 0x092d500f);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
+
+ tcg_out_call(s, cond, (uint32_t) helper_div);
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
+
+ /* ldmia sp, { r0 - r3, fp, lr } */
+ tcg_out32(s, (cond << 28) | 0x089d500f);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
+
+ tcg_out_call(s, cond, (uint32_t) helper_rem);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
+
+ /* ldr r0, [sp], #4 */
+ if (rem_reg != 0 && div_reg != 0)
+ tcg_out32(s, (cond << 28) | 0x04bd0004);
+ /* ldr r1, [sp], #4 */
+ if (rem_reg != 1 && div_reg != 1)
+ tcg_out32(s, (cond << 28) | 0x04bd1004);
+ /* ldr r2, [sp], #4 */
+ if (rem_reg != 2 && div_reg != 2)
+ tcg_out32(s, (cond << 28) | 0x04bd2004);
+ /* ldr r3, [sp], #4 */
+ if (rem_reg != 3 && div_reg != 3)
+ tcg_out32(s, (cond << 28) | 0x04bd3004);
+ /* ldr ip, [sp], #4 */
+ if (rem_reg != 12 && div_reg != 12)
+ tcg_out32(s, (cond << 28) | 0x04bdc004);
+ /* ldr lr, [sp], #4 */
+ if (rem_reg != 14 && div_reg != 14)
+ tcg_out32(s, (cond << 28) | 0x04bde004);
+}
+
+#ifdef CONFIG_SOFTMMU
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
+
+static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
+ const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2;
+#ifdef CONFIG_SOFTMMU
+ int mem_index, s_bits;
+# if TARGET_LONG_BITS == 64
+ int addr_reg2;
+# endif
+ uint32_t *label_ptr;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* surpress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+#ifdef CONFIG_SOFTMMU
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ /* Should generate something like the following:
+ * shr r8, addr_reg, #TARGET_PAGE_BITS
+ * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
+ * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
+ */
+# if CPU_TLB_BITS > 8
+# error
+# endif
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND,
+ 0, 8, CPU_TLB_SIZE - 1);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
+ 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
+ /* In the
+ * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
+ * below, the offset is likely to exceed 12 bits if mem_index != 0 and
+ * not exceed otherwise, so use an
+ * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * before.
+ */
+ if (mem_index)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
+ (mem_index << (TLB_SHIFT & 1)) |
+ ((16 - (TLB_SHIFT >> 1)) << 8));
+ tcg_out_ld32_12(s, COND_AL, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_read));
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
+ 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ /* Check alignment. */
+ if (s_bits)
+ tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
+ 0, addr_reg, (1 << s_bits) - 1);
+# if TARGET_LONG_BITS == 64
+ /* XXX: possibly we could use a block data load or writeback in
+ * the first access. */
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
+ 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
+# endif
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addend));
+
+ switch (opc) {
+ case 0:
+ tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 0 | 4:
+ tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1:
+ tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1 | 4:
+ tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 2:
+ default:
+ tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 3:
+ tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
+ tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
+ break;
+ }
+
+ label_ptr = (void *) s->code_ptr;
+ tcg_out_b(s, COND_EQ, 8);
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
+# endif
+
+ /* TODO: move this code to where the constants pool will be */
+ if (addr_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, addr_reg, SHIFT_IMM_LSL(0));
+# if TARGET_LONG_BITS == 32
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
+# else
+ if (addr_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+# endif
+ tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
+ (tcg_target_long) s->code_ptr);
+
+ switch (opc) {
+ case 0 | 4:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, 0, SHIFT_IMM_LSL(24));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_ASR(24));
+ break;
+ case 1 | 4:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, 0, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_ASR(16));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ if (data_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_LSL(0));
+ break;
+ case 3:
+ if (data_reg != 0)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg2, 0, 1, SHIFT_IMM_LSL(0));
+ break;
+ }
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
+# endif
+
+ *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
+#else
+ switch (opc) {
+ case 0:
+ tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 0 | 4:
+ tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1:
+ tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1 | 4:
+ tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 2:
+ default:
+ tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 3:
+ /* TODO: use block load -
+ * check that data_reg2 > data_reg or the other way */
+ tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
+ tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
+ break;
+ }
+#endif
+}
+
+static inline void tcg_out_qemu_st(TCGContext *s, int cond,
+ const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2;
+#ifdef CONFIG_SOFTMMU
+ int mem_index, s_bits;
+# if TARGET_LONG_BITS == 64
+ int addr_reg2;
+# endif
+ uint32_t *label_ptr;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* surpress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+#ifdef CONFIG_SOFTMMU
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ /* Should generate something like the following:
+ * shr r8, addr_reg, #TARGET_PAGE_BITS
+ * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
+ * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
+ */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND,
+ 0, 8, CPU_TLB_SIZE - 1);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
+ 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
+ /* In the
+ * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
+ * below, the offset is likely to exceed 12 bits if mem_index != 0 and
+ * not exceed otherwise, so use an
+ * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * before.
+ */
+ if (mem_index)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
+ (mem_index << (TLB_SHIFT & 1)) |
+ ((16 - (TLB_SHIFT >> 1)) << 8));
+ tcg_out_ld32_12(s, COND_AL, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_write));
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
+ 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ /* Check alignment. */
+ if (s_bits)
+ tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
+ 0, addr_reg, (1 << s_bits) - 1);
+# if TARGET_LONG_BITS == 64
+ /* XXX: possibly we could use a block data load or writeback in
+ * the first access. */
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_write)
+ + 4);
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
+ 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
+# endif
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addend));
+
+ switch (opc) {
+ case 0:
+ tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 0 | 4:
+ tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1:
+ tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1 | 4:
+ tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 2:
+ default:
+ tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 3:
+ tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
+ tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
+ break;
+ }
+
+ label_ptr = (void *) s->code_ptr;
+ tcg_out_b(s, COND_EQ, 8);
+
+ /* TODO: move this code to where the constants pool will be */
+ if (addr_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, addr_reg, SHIFT_IMM_LSL(0));
+# if TARGET_LONG_BITS == 32
+ switch (opc) {
+ case 0:
+ tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 1:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, 1, SHIFT_IMM_LSR(16));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 2:
+ if (data_reg != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 3:
+ if (data_reg != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg2, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ }
+# else
+ if (addr_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
+ switch (opc) {
+ case 0:
+ tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 1:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, 2, SHIFT_IMM_LSR(16));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 2:
+ if (data_reg != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 3:
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
+ tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
+ if (data_reg != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 3)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 3, 0, data_reg2, SHIFT_IMM_LSL(0));
+ break;
+ }
+# endif
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
+# endif
+
+ tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
+ (tcg_target_long) s->code_ptr);
+# if TARGET_LONG_BITS == 64
+ if (opc == 3)
+ tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
+# endif
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
+# endif
+
+ *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
+#else
+ switch (opc) {
+ case 0:
+ tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 0 | 4:
+ tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1:
+ tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1 | 4:
+ tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 2:
+ default:
+ tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 3:
+ /* TODO: use block store -
+ * check that data_reg2 > data_reg or the other way */
+ tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
+ tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
+ break;
+ }
+#endif
+}
+
+static uint8_t *tb_ret_addr;
+
+static inline void tcg_out_op(TCGContext *s, int opc,
+ const TCGArg *args, const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+#ifdef SAVE_LR
+ if (args[0] >> 8)
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
+ else
+ tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
+ if (args[0] >> 8)
+ tcg_out32(s, args[0]);
+#else
+ if (args[0] >> 8)
+ tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
+ else
+ tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
+ tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
+ if (args[0] >> 8)
+ tcg_out32(s, args[0]);
+#endif
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* Direct jump method */
+#if 1
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out_b(s, COND_AL, 8);
+#else
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out32(s, 0);
+#endif
+ } else {
+ /* Indirect jump method */
+#if 1
+ c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
+ if (c > 0xfff || c < -0xfff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_R0,
+ (tcg_target_long) (s->tb_next + args[0]));
+ tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
+ } else
+ tcg_out_ld32_12(s, COND_AL, 15, 15, c);
+#else
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
+ tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
+ tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
+#endif
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0])
+ tcg_out_call(s, COND_AL, args[0]);
+ else
+ tcg_out_callr(s, COND_AL, args[0]);
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0])
+ tcg_out_goto(s, COND_AL, args[0]);
+ else
+ tcg_out_bx(s, COND_AL, args[0]);
+ break;
+ case INDEX_op_br:
+ tcg_out_goto_label(s, COND_AL, args[0]);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st8_i32:
+ tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_mov_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi32(s, COND_AL, args[0], args[1]);
+ break;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ goto gen_arith;
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith;
+ case INDEX_op_or_i32:
+ c = ARITH_ORR;
+ goto gen_arith;
+ case INDEX_op_xor_i32:
+ c = ARITH_EOR;
+ /* Fall through. */
+ gen_arith:
+ tcg_out_dat_reg(s, COND_AL, c,
+ args[0], args[1], args[2], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_add2_i32:
+ tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
+ args[0], args[1], args[2], args[3],
+ args[4], args[5], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_sub2_i32:
+ tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
+ args[0], args[1], args[2], args[3],
+ args[4], args[5], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_neg_i32:
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
+ break;
+ case INDEX_op_mul_i32:
+ tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_mulu2_i32:
+ tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
+ break;
+ case INDEX_op_div2_i32:
+ tcg_out_div_helper(s, COND_AL, args,
+ tcg_helper_div_i64, tcg_helper_rem_i64,
+ SHIFT_IMM_ASR(31));
+ break;
+ case INDEX_op_divu2_i32:
+ tcg_out_div_helper(s, COND_AL, args,
+ tcg_helper_divu_i64, tcg_helper_remu_i64,
+ SHIFT_IMM_LSR(31));
+ break;
+ /* XXX: Perhaps args[2] & 0x1f is wrong */
+ case INDEX_op_shl_i32:
+ c = const_args[2] ?
+ SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
+ goto gen_shift32;
+ case INDEX_op_shr_i32:
+ c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
+ SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
+ goto gen_shift32;
+ case INDEX_op_sar_i32:
+ c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
+ SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
+ /* Fall through. */
+ gen_shift32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
+ break;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
+ args[0], args[1], SHIFT_IMM_LSL(0));
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
+ break;
+ case INDEX_op_brcond2_i32:
+ /* The resulting conditions are:
+ * TCG_COND_EQ --> a0 == a2 && a1 == a3,
+ * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
+ * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
+ * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
+ * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
+ * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
+ */
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
+ args[1], args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
+ args[0], args[2], SHIFT_IMM_LSL(0));
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, COND_AL, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, COND_AL, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, COND_AL, args, 2);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, COND_AL, args, 3);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, COND_AL, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, COND_AL, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, COND_AL, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, COND_AL, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(24));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[0], SHIFT_IMM_ASR(24));
+ break;
+ case INDEX_op_ext16s_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[0], SHIFT_IMM_ASR(16));
+ break;
+
+ default:
+ tcg_abort();
+ }
+}
+
+static const TCGTargetOpDef arm_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ /* TODO: "r", "r", "ri" */
+ { INDEX_op_add_i32, { "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "r" } },
+ { INDEX_op_mul_i32, { "r", "r", "r" } },
+ { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
+ { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
+ { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
+ { INDEX_op_and_i32, { "r", "r", "r" } },
+ { INDEX_op_or_i32, { "r", "r", "r" } },
+ { INDEX_op_xor_i32, { "r", "r", "r" } },
+ { INDEX_op_neg_i32, { "r", "r" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+ /* TODO: "r", "r", "r", "r", "ri", "ri" */
+ { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld32u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
+
+ { INDEX_op_qemu_st8, { "x", "x", "X" } },
+ { INDEX_op_qemu_st16, { "x", "x", "X" } },
+ { INDEX_op_qemu_st32, { "x", "x", "X" } },
+ { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
+ tcg_abort();
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
+ ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ ((2 << TCG_REG_R3) - 1) |
+ (1 << TCG_REG_R12) | (1 << TCG_REG_R14));
+
+ tcg_regset_clear(s->reserved_regs);
+#ifdef SAVE_LR
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
+#endif
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
+
+ tcg_add_target_add_op_defs(arm_op_defs);
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ tcg_out_st32(s, COND_AL, arg, arg1, arg2);
+}
+
+void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val > 0)
+ if (val < 0x100)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
+ else
+ tcg_abort();
+ else if (val < 0) {
+ if (val > -0x100)
+ tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
+ else
+ tcg_abort();
+ }
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ tcg_out_movi32(s, COND_AL, ret, arg);
+}
+
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ /* stmdb sp!, { r9 - r11, lr } */
+ tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
+
+ tcg_out_bx(s, COND_AL, TCG_REG_R0);
+ tb_ret_addr = s->code_ptr;
+
+ /* ldmia sp!, { r9 - r11, pc } */
+ tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);
+}
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
new file mode 100644
index 0000000..6c180af
--- /dev/null
+++ b/tcg/arm/tcg-target.h
@@ -0,0 +1,76 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2008 Andrzej Zaborowski
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_ARM 1
+
+#define TCG_TARGET_REG_BITS 32
+#undef TCG_TARGET_WORDS_BIGENDIAN
+#undef TCG_TARGET_HAS_div_i32
+#undef TCG_TARGET_HAS_div_i64
+#undef TCG_TARGET_HAS_bswap_i32
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_neg_i32
+#undef TCG_TARGET_HAS_neg_i64
+#undef TCG_TARGET_STACK_GROWSUP
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_TARGET_NB_REGS
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R13
+#define TCG_TARGET_STACK_ALIGN 8
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+enum {
+ /* Note: must be synced with dyngen-exec.h */
+ TCG_AREG0 = TCG_REG_R7,
+ TCG_AREG1 = TCG_REG_R4,
+ TCG_AREG2 = TCG_REG_R5,
+ TCG_AREG3 = TCG_REG_R6,
+};
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ register unsigned long _beg __asm ("a1") = start;
+ register unsigned long _end __asm ("a2") = stop;
+ register unsigned long _flg __asm ("a3") = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+}
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
new file mode 100644
index 0000000..3affd26
--- /dev/null
+++ b/tcg/hppa/tcg-target.c
@@ -0,0 +1,973 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%r0",
+ "%r1",
+ "%rp",
+ "%r3",
+ "%r4",
+ "%r5",
+ "%r6",
+ "%r7",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15",
+ "%r16",
+ "%r17",
+ "%r18",
+ "%r19",
+ "%r20",
+ "%r21",
+ "%r22",
+ "%r23",
+ "%r24",
+ "%r25",
+ "%r26",
+ "%dp",
+ "%ret0",
+ "%ret1",
+ "%sp",
+ "%r31",
+};
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+
+ TCG_REG_R17,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+};
+
+static const int tcg_target_call_iarg_regs[4] = {
+ TCG_REG_R26,
+ TCG_REG_R25,
+ TCG_REG_R24,
+ TCG_REG_R23,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_RET0,
+ TCG_REG_RET1,
+};
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ switch (type) {
+ case R_PARISC_PCREL17F:
+ hppa_patch17f((uint32_t *)code_ptr, value, addend);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 4;
+}
+
+/* parse target specific constraints */
+int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'L': /* qemu_ld/st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+
+ /* TODO */
+
+ return 0;
+}
+
+#define INSN_OP(x) ((x) << 26)
+#define INSN_EXT3BR(x) ((x) << 13)
+#define INSN_EXT3SH(x) ((x) << 10)
+#define INSN_EXT4(x) ((x) << 6)
+#define INSN_EXT5(x) (x)
+#define INSN_EXT6(x) ((x) << 6)
+#define INSN_EXT7(x) ((x) << 6)
+#define INSN_EXT8A(x) ((x) << 6)
+#define INSN_EXT8B(x) ((x) << 5)
+#define INSN_T(x) (x)
+#define INSN_R1(x) ((x) << 16)
+#define INSN_R2(x) ((x) << 21)
+#define INSN_DEP_LEN(x) (32 - (x))
+#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
+#define INSN_SHDEP_P(x) ((x) << 5)
+#define INSN_COND(x) ((x) << 13)
+
+#define COND_NEVER 0
+#define COND_EQUAL 1
+#define COND_LT 2
+#define COND_LTEQ 3
+#define COND_LTU 4
+#define COND_LTUEQ 5
+#define COND_SV 6
+#define COND_OD 7
+
+
+/* Logical ADD */
+#define ARITH_ADD (INSN_OP(0x02) | INSN_EXT6(0x28))
+#define ARITH_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
+#define ARITH_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
+#define ARITH_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
+#define ARITH_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
+
+#define SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
+#define VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
+#define DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
+#define ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
+#define ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
+#define EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
+#define EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
+#define VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
+
+#define SUBI (INSN_OP(0x25))
+#define MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
+
+#define BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
+#define BLE_SR4 (INSN_OP(0x39) | (1 << 13))
+#define BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
+#define BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
+#define LDIL (INSN_OP(0x08))
+#define LDO (INSN_OP(0x0d))
+
+#define LDB (INSN_OP(0x10))
+#define LDH (INSN_OP(0x11))
+#define LDW (INSN_OP(0x12))
+#define LDWM (INSN_OP(0x13))
+
+#define STB (INSN_OP(0x18))
+#define STH (INSN_OP(0x19))
+#define STW (INSN_OP(0x1a))
+#define STWM (INSN_OP(0x1b))
+
+#define COMBT (INSN_OP(0x20))
+#define COMBF (INSN_OP(0x22))
+
+static int lowsignext(uint32_t val, int start, int length)
+{
+ return (((val << 1) & ~(~0 << length)) |
+ ((val >> (length - 1)) & 1)) << start;
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ /* PA1.1 defines COPY as OR r,0,t */
+ tcg_out32(s, ARITH_OR | INSN_T(ret) | INSN_R1(arg) | INSN_R2(TCG_REG_R0));
+
+ /* PA2.0 defines COPY as LDO 0(r),t
+ * but hppa-dis.c is unaware of this definition */
+ /* tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(arg) | reassemble_14(0)); */
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ if (arg == (arg & 0x1fff)) {
+ tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(TCG_REG_R0) |
+ reassemble_14(arg));
+ } else {
+ tcg_out32(s, LDIL | INSN_R2(ret) |
+ reassemble_21(lrsel((uint32_t)arg, 0)));
+ if (arg & 0x7ff)
+ tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(ret) |
+ reassemble_14(rrsel((uint32_t)arg, 0)));
+ }
+}
+
+static inline void tcg_out_ld_raw(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out32(s, LDIL | INSN_R2(ret) |
+ reassemble_21(lrsel((uint32_t)arg, 0)));
+ tcg_out32(s, LDW | INSN_R1(ret) | INSN_R2(ret) |
+ reassemble_14(rrsel((uint32_t)arg, 0)));
+}
+
+static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out_ld_raw(s, ret, arg);
+}
+
+static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset,
+ int op)
+{
+ if (offset == (offset & 0xfff))
+ tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) |
+ reassemble_14(offset));
+ else {
+ fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
+ tcg_abort();
+ }
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ fprintf(stderr, "unimplemented %s\n", __func__);
+ tcg_abort();
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ fprintf(stderr, "unimplemented %s\n", __func__);
+ tcg_abort();
+}
+
+static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
+{
+ tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
+}
+
+static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
+ tcg_target_long val, int op)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, val);
+ tcg_out_arith(s, t, r1, TCG_REG_R20, op);
+}
+
+static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
+}
+
+static inline void tcg_out_nop(TCGContext *s)
+{
+ tcg_out32(s, ARITH_OR | INSN_T(TCG_REG_R0) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(TCG_REG_R0));
+}
+
+static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) {
+ tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+}
+
+static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) {
+ tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+}
+
+static inline void tcg_out_bswap16(TCGContext *s, int ret, int arg) {
+ if(ret != arg)
+ tcg_out_mov(s, ret, arg);
+ tcg_out32(s, DEP | INSN_R2(ret) | INSN_R1(ret) |
+ INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
+ tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(ret) | INSN_SHDEP_CP(8));
+}
+
+static inline void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) {
+ tcg_out32(s, SHD | INSN_T(temp) | INSN_R1(arg) |
+ INSN_R2(arg) | INSN_SHDEP_CP(16));
+ tcg_out32(s, DEP | INSN_R2(temp) | INSN_R1(temp) |
+ INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
+ tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(arg) |
+ INSN_R2(temp) | INSN_SHDEP_CP(8));
+}
+
+static inline void tcg_out_call(TCGContext *s, void *func)
+{
+ uint32_t val = (uint32_t)__canonicalize_funcptr_for_compare(func);
+ tcg_out32(s, LDIL | INSN_R2(TCG_REG_R20) |
+ reassemble_21(lrsel(val, 0)));
+ tcg_out32(s, BLE_SR4 | INSN_R2(TCG_REG_R20) |
+ reassemble_17(rrsel(val, 0) >> 2));
+ tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* surpress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ r0 = TCG_REG_R26;
+ r1 = TCG_REG_R25;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
+ INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+
+ tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
+ ARITH_AND);
+
+ tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
+ tcg_out_arithi(s, r1, r1,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_read),
+ ARITH_ADD);
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
+
+#if TARGET_LONG_BITS == 32
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+#else
+ /* if not equal, jump to label3 */
+ label3_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
+
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_nop(s); /* delay slot */
+
+ /* label3: */
+ *label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
+#endif
+
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R25, mem_index);
+#else
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+ tcg_out_mov(s, TCG_REG_R25, addr_reg2);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
+#endif
+
+ tcg_out_call(s, qemu_ld_helpers[s_bits]);
+
+ switch(opc) {
+ case 0 | 4:
+ tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
+ break;
+ case 1 | 4:
+ tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ tcg_out_mov(s, data_reg, TCG_REG_RET0);
+ break;
+ case 3:
+ tcg_abort();
+ tcg_out_mov(s, data_reg, TCG_REG_RET0);
+ tcg_out_mov(s, data_reg2, TCG_REG_RET1);
+ break;
+ }
+
+ /* jump to label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
+
+ /* label1: */
+ *label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
+
+ tcg_out_arithi(s, TCG_REG_R20, r1,
+ offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_read),
+ ARITH_ADD);
+ tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
+ tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out_ldst(s, data_reg, r0, 0, LDB);
+ break;
+ case 0 | 4:
+ tcg_out_ldst(s, data_reg, r0, 0, LDB);
+ tcg_out_ext8s(s, data_reg, data_reg);
+ break;
+ case 1:
+ tcg_out_ldst(s, data_reg, r0, 0, LDH);
+ if (bswap)
+ tcg_out_bswap16(s, data_reg, data_reg);
+ break;
+ case 1 | 4:
+ tcg_out_ldst(s, data_reg, r0, 0, LDH);
+ if (bswap)
+ tcg_out_bswap16(s, data_reg, data_reg);
+ tcg_out_ext16s(s, data_reg, data_reg);
+ break;
+ case 2:
+ tcg_out_ldst(s, data_reg, r0, 0, LDW);
+ if (bswap)
+ tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
+ break;
+ case 3:
+ tcg_abort();
+ if (!bswap) {
+ tcg_out_ldst(s, data_reg, r0, 0, LDW);
+ tcg_out_ldst(s, data_reg2, r0, 4, LDW);
+ } else {
+ tcg_out_ldst(s, data_reg, r0, 4, LDW);
+ tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
+ tcg_out_ldst(s, data_reg2, r0, 0, LDW);
+ tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
+#endif
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* surpress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+
+ s_bits = opc;
+
+ r0 = TCG_REG_R26;
+ r1 = TCG_REG_R25;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
+ INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+
+ tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
+ ARITH_AND);
+
+ tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
+ tcg_out_arithi(s, r1, r1,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_write),
+ ARITH_ADD);
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
+
+#if TARGET_LONG_BITS == 32
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+#else
+ /* if not equal, jump to label3 */
+ label3_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
+
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_nop(s); /* delay slot */
+
+ /* label3: */
+ *label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
+#endif
+
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+#if TARGET_LONG_BITS == 64
+ tcg_out_mov(s, TCG_REG_R25, addr_reg2);
+ if (opc == 3) {
+ tcg_abort();
+ tcg_out_mov(s, TCG_REG_R24, data_reg);
+ tcg_out_mov(s, TCG_REG_R23, data_reg2);
+ /* TODO: push mem_index */
+ tcg_abort();
+ } else {
+ switch(opc) {
+ case 0:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+ break;
+ case 1:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_R24, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
+ }
+#else
+ if (opc == 3) {
+ tcg_abort();
+ tcg_out_mov(s, TCG_REG_R25, data_reg);
+ tcg_out_mov(s, TCG_REG_R24, data_reg2);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
+ } else {
+ switch(opc) {
+ case 0:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+ break;
+ case 1:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_R25, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
+ }
+#endif
+ tcg_out_call(s, qemu_st_helpers[s_bits]);
+
+ /* jump to label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
+
+ /* label1: */
+ *label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
+
+ tcg_out_arithi(s, TCG_REG_R20, r1,
+ offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_write),
+ ARITH_ADD);
+ tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
+ tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out_ldst(s, data_reg, r0, 0, STB);
+ break;
+ case 1:
+ if (bswap) {
+ tcg_out_bswap16(s, TCG_REG_R20, data_reg);
+ data_reg = TCG_REG_R20;
+ }
+ tcg_out_ldst(s, data_reg, r0, 0, STH);
+ break;
+ case 2:
+ if (bswap) {
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
+ data_reg = TCG_REG_R20;
+ }
+ tcg_out_ldst(s, data_reg, r0, 0, STW);
+ break;
+ case 3:
+ tcg_abort();
+ if (!bswap) {
+ tcg_out_ldst(s, data_reg, r0, 0, STW);
+ tcg_out_ldst(s, data_reg2, r0, 4, STW);
+ } else {
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
+ tcg_out_ldst(s, TCG_REG_R20, r0, 4, STW);
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg2, TCG_REG_R20);
+ tcg_out_ldst(s, TCG_REG_R20, r0, 0, STW);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, args[0]);
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R18));
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ fprintf(stderr, "goto_tb direct\n");
+ tcg_abort();
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, args[0]);
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ } else {
+ /* indirect jump method */
+ tcg_out_ld_ptr(s, TCG_REG_R20,
+ (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ tcg_out32(s, BLE_SR4 | INSN_R2(args[0]));
+ tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
+ break;
+ case INDEX_op_jmp:
+ fprintf(stderr, "unimplemented jmp\n");
+ tcg_abort();
+ break;
+ case INDEX_op_br:
+ fprintf(stderr, "unimplemented br\n");
+ tcg_abort();
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDB);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDB);
+ tcg_out_ext8s(s, args[0], args[0]);
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDH);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDH);
+ tcg_out_ext16s(s, args[0], args[0]);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDW);
+ break;
+
+ case INDEX_op_st8_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STB);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STH);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STW);
+ break;
+
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith;
+ case INDEX_op_or_i32:
+ c = ARITH_OR;
+ goto gen_arith;
+ case INDEX_op_xor_i32:
+ c = ARITH_XOR;
+ goto gen_arith;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ goto gen_arith;
+
+ case INDEX_op_shl_i32:
+ tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
+ lowsignext(0x1f, 0, 11));
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
+ tcg_out32(s, ZVDEP | INSN_R2(args[0]) | INSN_R1(args[1]) |
+ INSN_DEP_LEN(32));
+ break;
+ case INDEX_op_shr_i32:
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(args[2]));
+ tcg_out32(s, VSHD | INSN_T(args[0]) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(args[1]));
+ break;
+ case INDEX_op_sar_i32:
+ tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
+ lowsignext(0x1f, 0, 11));
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
+ tcg_out32(s, VEXTRS | INSN_R1(args[0]) | INSN_R2(args[1]) |
+ INSN_DEP_LEN(32));
+ break;
+
+ case INDEX_op_mul_i32:
+ fprintf(stderr, "unimplemented mul\n");
+ tcg_abort();
+ break;
+ case INDEX_op_mulu2_i32:
+ fprintf(stderr, "unimplemented mulu2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_div2_i32:
+ fprintf(stderr, "unimplemented div2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_divu2_i32:
+ fprintf(stderr, "unimplemented divu2\n");
+ tcg_abort();
+ break;
+
+ case INDEX_op_brcond_i32:
+ fprintf(stderr, "unimplemented brcond\n");
+ tcg_abort();
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+
+ default:
+ fprintf(stderr, "unknown opcode 0x%x\n", opc);
+ tcg_abort();
+ }
+ return;
+
+gen_arith:
+ tcg_out_arith(s, args[0], args[1], args[2], c);
+}
+
+static const TCGTargetOpDef hppa_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+
+ { INDEX_op_call, { "r" } },
+ { INDEX_op_jmp, { "r" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "r" } },
+ { INDEX_op_and_i32, { "r", "r", "r" } },
+ { INDEX_op_or_i32, { "r", "r", "r" } },
+ { INDEX_op_xor_i32, { "r", "r", "r" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "r" } },
+ { INDEX_op_shr_i32, { "r", "r", "r" } },
+ { INDEX_op_sar_i32, { "r", "r", "r" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+#if TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L" } },
+#else
+ { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
+#endif
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R20) |
+ (1 << TCG_REG_R21) |
+ (1 << TCG_REG_R22) |
+ (1 << TCG_REG_R23) |
+ (1 << TCG_REG_R24) |
+ (1 << TCG_REG_R25) |
+ (1 << TCG_REG_R26));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
+
+ tcg_add_target_add_op_defs(hppa_op_defs);
+}
diff --git a/tcg/hppa/tcg-target.h b/tcg/hppa/tcg-target.h
new file mode 100644
index 0000000..8e2693d
--- /dev/null
+++ b/tcg/hppa/tcg-target.h
@@ -0,0 +1,204 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#define TCG_TARGET_HPPA 1
+
+#if defined(_PA_RISC1_1)
+#define TCG_TARGET_REG_BITS 32
+#else
+#error unsupported
+#endif
+
+#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_RP,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_DP,
+ TCG_REG_RET0,
+ TCG_REG_RET1,
+ TCG_REG_SP,
+ TCG_REG_R31,
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_STACK_GROWSUP
+
+/* optional instructions */
+//#define TCG_TARGET_HAS_ext8s_i32
+//#define TCG_TARGET_HAS_ext16s_i32
+//#define TCG_TARGET_HAS_bswap16_i32
+//#define TCG_TARGET_HAS_bswap_i32
+
+/* Note: must be synced with dyngen-exec.h */
+#define TCG_AREG0 TCG_REG_R17
+#define TCG_AREG1 TCG_REG_R14
+#define TCG_AREG2 TCG_REG_R15
+#define TCG_AREG3 TCG_REG_R16
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ start &= ~31;
+ while (start <= stop)
+ {
+ asm volatile ("fdc 0(%0)\n"
+ "sync\n"
+ "fic 0(%%sr4, %0)\n"
+ "sync\n"
+ : : "r"(start) : "memory");
+ start += 32;
+ }
+}
+
+/* supplied by libgcc */
+extern void *__canonicalize_funcptr_for_compare(void *);
+
+/* Field selection types defined by hppa */
+#define rnd(x) (((x)+0x1000)&~0x1fff)
+/* lsel: select left 21 bits */
+#define lsel(v,a) (((v)+(a))>>11)
+/* rsel: select right 11 bits */
+#define rsel(v,a) (((v)+(a))&0x7ff)
+/* lrsel with rounding of addend to nearest 8k */
+#define lrsel(v,a) (((v)+rnd(a))>>11)
+/* rrsel with rounding of addend to nearest 8k */
+#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
+
+#define mask(x,sz) ((x) & ~((1<<(sz))-1))
+
+static inline int reassemble_12(int as12)
+{
+ return (((as12 & 0x800) >> 11) |
+ ((as12 & 0x400) >> 8) |
+ ((as12 & 0x3ff) << 3));
+}
+
+static inline int reassemble_14(int as14)
+{
+ return (((as14 & 0x1fff) << 1) |
+ ((as14 & 0x2000) >> 13));
+}
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+static inline int reassemble_21(int as21)
+{
+ return (((as21 & 0x100000) >> 20) |
+ ((as21 & 0x0ffe00) >> 8) |
+ ((as21 & 0x000180) << 7) |
+ ((as21 & 0x00007c) << 14) |
+ ((as21 & 0x000003) << 12));
+}
+
+static inline void hppa_patch21l(uint32_t *insn, int val, int addend)
+{
+ val = lrsel(val, addend);
+ *insn = mask(*insn, 21) | reassemble_21(val);
+}
+
+static inline void hppa_patch14r(uint32_t *insn, int val, int addend)
+{
+ val = rrsel(val, addend);
+ *insn = mask(*insn, 14) | reassemble_14(val);
+}
+
+static inline void hppa_patch17r(uint32_t *insn, int val, int addend)
+{
+ val = rrsel(val, addend);
+ *insn = (*insn & ~0x1f1ffd) | reassemble_17(val);
+}
+
+
+static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend)
+{
+ register unsigned int dp asm("r27");
+ hppa_patch21l(insn, val - dp, addend);
+}
+
+static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend)
+{
+ register unsigned int dp asm("r27");
+ hppa_patch14r(insn, val - dp, addend);
+}
+
+static inline void hppa_patch17f(uint32_t *insn, int val, int addend)
+{
+ int dot = (int)insn & ~0x3;
+ int v = ((val + addend) - dot - 8) / 4;
+ if (v > (1 << 16) || v < -(1 << 16)) {
+ printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val);
+ abort();
+ }
+ *insn = (*insn & ~0x1f1ffd) | reassemble_17(v);
+}
+
+static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend)
+{
+ /* Transform addil L'sym(%dp) to ldil L'val, %r1 */
+ *insn = 0x20200000 | reassemble_21(lrsel(val, 0));
+}
+
+static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend)
+{
+ /* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */
+ hppa_patch14r(insn, val, addend);
+ /* HACK */
+ if (addend == 0)
+ *insn = (*insn & ~0xfc000000) | (0x0d << 26);
+}
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
new file mode 100644
index 0000000..08bb783
--- /dev/null
+++ b/tcg/i386/tcg-target.c
@@ -0,0 +1,1185 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%eax",
+ "%ecx",
+ "%edx",
+ "%ebx",
+ "%esp",
+ "%ebp",
+ "%esi",
+ "%edi",
+};
+
+int tcg_target_reg_alloc_order[] = {
+ TCG_REG_EAX,
+ TCG_REG_EDX,
+ TCG_REG_ECX,
+ TCG_REG_EBX,
+ TCG_REG_ESI,
+ TCG_REG_EDI,
+ TCG_REG_EBP,
+};
+
+const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
+const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
+
+static uint8_t *tb_ret_addr;
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch(type) {
+ case R_386_32:
+ *(uint32_t *)code_ptr = value;
+ break;
+ case R_386_PC32:
+ *(uint32_t *)code_ptr = value - (long)code_ptr;
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ flags &= TCG_CALL_TYPE_MASK;
+ switch(flags) {
+ case TCG_CALL_TYPE_STD:
+ return 0;
+ case TCG_CALL_TYPE_REGPARM_1:
+ case TCG_CALL_TYPE_REGPARM_2:
+ case TCG_CALL_TYPE_REGPARM:
+ return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
+ default:
+ tcg_abort();
+ }
+}
+
+/* parse target specific constraints */
+int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch(ct_str[0]) {
+ case 'a':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
+ break;
+ case 'b':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
+ break;
+ case 'c':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
+ break;
+ case 'd':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
+ break;
+ case 'S':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
+ break;
+ case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
+ break;
+ case 'q':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xf);
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xff);
+ break;
+
+ /* qemu_ld/st address constraint */
+ case 'L':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else
+ return 0;
+}
+
+#define ARITH_ADD 0
+#define ARITH_OR 1
+#define ARITH_ADC 2
+#define ARITH_SBB 3
+#define ARITH_AND 4
+#define ARITH_SUB 5
+#define ARITH_XOR 6
+#define ARITH_CMP 7
+
+#define SHIFT_SHL 4
+#define SHIFT_SHR 5
+#define SHIFT_SAR 7
+
+#define JCC_JMP (-1)
+#define JCC_JO 0x0
+#define JCC_JNO 0x1
+#define JCC_JB 0x2
+#define JCC_JAE 0x3
+#define JCC_JE 0x4
+#define JCC_JNE 0x5
+#define JCC_JBE 0x6
+#define JCC_JA 0x7
+#define JCC_JS 0x8
+#define JCC_JNS 0x9
+#define JCC_JP 0xa
+#define JCC_JNP 0xb
+#define JCC_JL 0xc
+#define JCC_JGE 0xd
+#define JCC_JLE 0xe
+#define JCC_JG 0xf
+
+#define P_EXT 0x100 /* 0x0f opcode prefix */
+
+static const uint8_t tcg_cond_to_jcc[10] = {
+ [TCG_COND_EQ] = JCC_JE,
+ [TCG_COND_NE] = JCC_JNE,
+ [TCG_COND_LT] = JCC_JL,
+ [TCG_COND_GE] = JCC_JGE,
+ [TCG_COND_LE] = JCC_JLE,
+ [TCG_COND_GT] = JCC_JG,
+ [TCG_COND_LTU] = JCC_JB,
+ [TCG_COND_GEU] = JCC_JAE,
+ [TCG_COND_LEU] = JCC_JBE,
+ [TCG_COND_GTU] = JCC_JA,
+};
+
+static inline void tcg_out_opc(TCGContext *s, int opc)
+{
+ if (opc & P_EXT)
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, opc);
+}
+
+static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
+{
+ tcg_out_opc(s, opc);
+ tcg_out8(s, 0xc0 | (r << 3) | rm);
+}
+
+/* rm == -1 means no register index */
+static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
+ int32_t offset)
+{
+ tcg_out_opc(s, opc);
+ if (rm == -1) {
+ tcg_out8(s, 0x05 | (r << 3));
+ tcg_out32(s, offset);
+ } else if (offset == 0 && rm != TCG_REG_EBP) {
+ if (rm == TCG_REG_ESP) {
+ tcg_out8(s, 0x04 | (r << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x00 | (r << 3) | rm);
+ }
+ } else if ((int8_t)offset == offset) {
+ if (rm == TCG_REG_ESP) {
+ tcg_out8(s, 0x44 | (r << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x40 | (r << 3) | rm);
+ }
+ tcg_out8(s, offset);
+ } else {
+ if (rm == TCG_REG_ESP) {
+ tcg_out8(s, 0x84 | (r << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x80 | (r << 3) | rm);
+ }
+ tcg_out32(s, offset);
+ }
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ if (arg != ret)
+ tcg_out_modrm(s, 0x8b, ret, arg);
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, int32_t arg)
+{
+ if (arg == 0) {
+ /* xor r0,r0 */
+ tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret);
+ } else {
+ tcg_out8(s, 0xb8 + ret);
+ tcg_out32(s, arg);
+ }
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ /* movl */
+ tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ /* movl */
+ tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2);
+}
+
+static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val)
+{
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x83, c, r0);
+ tcg_out8(s, val);
+ } else {
+ tcg_out_modrm(s, 0x81, c, r0);
+ tcg_out32(s, val);
+ }
+}
+
+void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0)
+ tgen_arithi(s, ARITH_ADD, reg, val);
+}
+
+static void tcg_out_jxx(TCGContext *s, int opc, int label_index)
+{
+ int32_t val, val1;
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value) {
+ val = l->u.value - (tcg_target_long)s->code_ptr;
+ val1 = val - 2;
+ if ((int8_t)val1 == val1) {
+ if (opc == -1)
+ tcg_out8(s, 0xeb);
+ else
+ tcg_out8(s, 0x70 + opc);
+ tcg_out8(s, val1);
+ } else {
+ if (opc == -1) {
+ tcg_out8(s, 0xe9);
+ tcg_out32(s, val - 5);
+ } else {
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, 0x80 + opc);
+ tcg_out32(s, val - 6);
+ }
+ }
+ } else {
+ if (opc == -1) {
+ tcg_out8(s, 0xe9);
+ } else {
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, 0x80 + opc);
+ }
+ tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
+ s->code_ptr += 4;
+ }
+}
+
+static void tcg_out_brcond(TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ if (const_arg2) {
+ if (arg2 == 0) {
+ /* test r, r */
+ tcg_out_modrm(s, 0x85, arg1, arg1);
+ } else {
+ tgen_arithi(s, ARITH_CMP, arg1, arg2);
+ }
+ } else {
+ tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1);
+ }
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
+}
+
+/* XXX: we implement it at the target level to avoid having to
+ handle cross basic blocks temporaries */
+static void tcg_out_brcond2(TCGContext *s,
+ const TCGArg *args, const int *const_args)
+{
+ int label_next;
+ label_next = gen_new_label();
+ switch(args[4]) {
+ case TCG_COND_EQ:
+ tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next);
+ tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]);
+ break;
+ case TCG_COND_NE:
+ tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]);
+ tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]);
+ break;
+ case TCG_COND_LT:
+ tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_LE:
+ tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_GT:
+ tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_GE:
+ tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_LTU:
+ tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_LEU:
+ tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_GTU:
+ tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
+ break;
+ case TCG_COND_GEU:
+ tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
+ tcg_out_jxx(s, JCC_JNE, label_next);
+ tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
+ break;
+ default:
+ tcg_abort();
+ }
+ tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
+ EAX. It will be useful once fixed registers globals are less
+ common. */
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ r0 = TCG_REG_EAX;
+ r1 = TCG_REG_EDX;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
+ tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+
+ tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
+ tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
+ tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
+ tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
+ tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
+ tcg_out8(s, (5 << 3) | r1);
+ tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
+
+ /* cmp 0(r1), r0 */
+ tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+#if TARGET_LONG_BITS == 32
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+#else
+ /* jne label3 */
+ tcg_out8(s, 0x70 + JCC_JNE);
+ label3_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* cmp 4(r1), addr_reg2 */
+ tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
+
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label3: */
+ *label3_ptr = s->code_ptr - label3_ptr - 1;
+#endif
+
+ /* XXX: move that code at the end of the TB */
+#if TARGET_LONG_BITS == 32
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
+#else
+ tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
+#endif
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+
+ switch(opc) {
+ case 0 | 4:
+ /* movsbl */
+ tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX);
+ break;
+ case 1 | 4:
+ /* movswl */
+ tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ tcg_out_mov(s, data_reg, TCG_REG_EAX);
+ break;
+ case 3:
+ if (data_reg == TCG_REG_EDX) {
+ tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
+ tcg_out_mov(s, data_reg2, TCG_REG_EAX);
+ } else {
+ tcg_out_mov(s, data_reg, TCG_REG_EAX);
+ tcg_out_mov(s, data_reg2, TCG_REG_EDX);
+ }
+ break;
+ }
+
+ /* jmp label2 */
+ tcg_out8(s, 0xeb);
+ label2_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label1: */
+ *label1_ptr = s->code_ptr - label1_ptr - 1;
+
+ /* add x(r1), r0 */
+ tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_read));
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 1;
+#else
+ bswap = 0;
+#endif
+ switch(opc) {
+ case 0:
+ /* movzbl */
+ tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
+ break;
+ case 0 | 4:
+ /* movsbl */
+ tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0);
+ break;
+ case 1:
+ /* movzwl */
+ tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
+ if (bswap) {
+ /* rolw $8, data_reg */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm(s, 0xc1, 0, data_reg);
+ tcg_out8(s, 8);
+ }
+ break;
+ case 1 | 4:
+ /* movswl */
+ tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0);
+ if (bswap) {
+ /* rolw $8, data_reg */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm(s, 0xc1, 0, data_reg);
+ tcg_out8(s, 8);
+
+ /* movswl data_reg, data_reg */
+ tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg);
+ }
+ break;
+ case 2:
+ /* movl (r0), data_reg */
+ tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
+ if (bswap) {
+ /* bswap */
+ tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
+ }
+ break;
+ case 3:
+ /* XXX: could be nicer */
+ if (r0 == data_reg) {
+ r1 = TCG_REG_EDX;
+ if (r1 == data_reg)
+ r1 = TCG_REG_EAX;
+ tcg_out_mov(s, r1, r0);
+ r0 = r1;
+ }
+ if (!bswap) {
+ tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
+ tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4);
+ } else {
+ tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4);
+ tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
+
+ tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0);
+ /* bswap */
+ tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = s->code_ptr - label2_ptr - 1;
+#endif
+}
+
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+
+ s_bits = opc;
+
+ r0 = TCG_REG_EAX;
+ r1 = TCG_REG_EDX;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
+ tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+
+ tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
+ tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
+ tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
+ tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
+ tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
+ tcg_out8(s, (5 << 3) | r1);
+ tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
+
+ /* cmp 0(r1), r0 */
+ tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+#if TARGET_LONG_BITS == 32
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+#else
+ /* jne label3 */
+ tcg_out8(s, 0x70 + JCC_JNE);
+ label3_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* cmp 4(r1), addr_reg2 */
+ tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
+
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label3: */
+ *label3_ptr = s->code_ptr - label3_ptr - 1;
+#endif
+
+ /* XXX: move that code at the end of the TB */
+#if TARGET_LONG_BITS == 32
+ if (opc == 3) {
+ tcg_out_mov(s, TCG_REG_EDX, data_reg);
+ tcg_out_mov(s, TCG_REG_ECX, data_reg2);
+ tcg_out8(s, 0x6a); /* push Ib */
+ tcg_out8(s, mem_index);
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+ tcg_out_addi(s, TCG_REG_ESP, 4);
+ } else {
+ switch(opc) {
+ case 0:
+ /* movzbl */
+ tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg);
+ break;
+ case 1:
+ /* movzwl */
+ tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg);
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_EDX, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+ }
+#else
+ if (opc == 3) {
+ tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
+ tcg_out8(s, 0x6a); /* push Ib */
+ tcg_out8(s, mem_index);
+ tcg_out_opc(s, 0x50 + data_reg2); /* push */
+ tcg_out_opc(s, 0x50 + data_reg); /* push */
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+ tcg_out_addi(s, TCG_REG_ESP, 12);
+ } else {
+ tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
+ switch(opc) {
+ case 0:
+ /* movzbl */
+ tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg);
+ break;
+ case 1:
+ /* movzwl */
+ tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg);
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_ECX, data_reg);
+ break;
+ }
+ tcg_out8(s, 0x6a); /* push Ib */
+ tcg_out8(s, mem_index);
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+ tcg_out_addi(s, TCG_REG_ESP, 4);
+ }
+#endif
+
+ /* jmp label2 */
+ tcg_out8(s, 0xeb);
+ label2_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label1: */
+ *label1_ptr = s->code_ptr - label1_ptr - 1;
+
+ /* add x(r1), r0 */
+ tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_write));
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 1;
+#else
+ bswap = 0;
+#endif
+ switch(opc) {
+ case 0:
+ /* movb */
+ tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0);
+ break;
+ case 1:
+ if (bswap) {
+ tcg_out_mov(s, r1, data_reg);
+ tcg_out8(s, 0x66); /* rolw $8, %ecx */
+ tcg_out_modrm(s, 0xc1, 0, r1);
+ tcg_out8(s, 8);
+ data_reg = r1;
+ }
+ /* movw */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
+ break;
+ case 2:
+ if (bswap) {
+ tcg_out_mov(s, r1, data_reg);
+ /* bswap data_reg */
+ tcg_out_opc(s, (0xc8 + r1) | P_EXT);
+ data_reg = r1;
+ }
+ /* movl */
+ tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out_mov(s, r1, data_reg2);
+ /* bswap data_reg */
+ tcg_out_opc(s, (0xc8 + r1) | P_EXT);
+ tcg_out_modrm_offset(s, 0x89, r1, r0, 0);
+ tcg_out_mov(s, r1, data_reg);
+ /* bswap data_reg */
+ tcg_out_opc(s, (0xc8 + r1) | P_EXT);
+ tcg_out_modrm_offset(s, 0x89, r1, r0, 4);
+ } else {
+ tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
+ tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = s->code_ptr - label2_ptr - 1;
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc,
+ const TCGArg *args, const int *const_args)
+{
+ int c;
+
+ switch(opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
+ tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
+ tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ tcg_out8(s, 0xe9); /* jmp im */
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out32(s, 0);
+ } else {
+ /* indirect jump method */
+ /* jmp Ev */
+ tcg_out_modrm_offset(s, 0xff, 4, -1,
+ (tcg_target_long)(s->tb_next + args[0]));
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0]) {
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
+ } else {
+ tcg_out_modrm(s, 0xff, 2, args[0]);
+ }
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out8(s, 0xe9);
+ tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
+ } else {
+ tcg_out_modrm(s, 0xff, 4, args[0]);
+ }
+ break;
+ case INDEX_op_br:
+ tcg_out_jxx(s, JCC_JMP, args[0]);
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ /* movzbl */
+ tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i32:
+ /* movsbl */
+ tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16u_i32:
+ /* movzwl */
+ tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i32:
+ /* movswl */
+ tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i32:
+ /* movl */
+ tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st8_i32:
+ /* movb */
+ tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st16_i32:
+ /* movw */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i32:
+ /* movl */
+ tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith;
+ case INDEX_op_or_i32:
+ c = ARITH_OR;
+ goto gen_arith;
+ case INDEX_op_xor_i32:
+ c = ARITH_XOR;
+ goto gen_arith;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ gen_arith:
+ if (const_args[2]) {
+ tgen_arithi(s, c, args[0], args[2]);
+ } else {
+ tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
+ }
+ break;
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ int32_t val;
+ val = args[2];
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x6b, args[0], args[0]);
+ tcg_out8(s, val);
+ } else {
+ tcg_out_modrm(s, 0x69, args[0], args[0]);
+ tcg_out32(s, val);
+ }
+ } else {
+ tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
+ }
+ break;
+ case INDEX_op_mulu2_i32:
+ tcg_out_modrm(s, 0xf7, 4, args[3]);
+ break;
+ case INDEX_op_div2_i32:
+ tcg_out_modrm(s, 0xf7, 7, args[4]);
+ break;
+ case INDEX_op_divu2_i32:
+ tcg_out_modrm(s, 0xf7, 6, args[4]);
+ break;
+ case INDEX_op_shl_i32:
+ c = SHIFT_SHL;
+ gen_shift32:
+ if (const_args[2]) {
+ if (args[2] == 1) {
+ tcg_out_modrm(s, 0xd1, c, args[0]);
+ } else {
+ tcg_out_modrm(s, 0xc1, c, args[0]);
+ tcg_out8(s, args[2]);
+ }
+ } else {
+ tcg_out_modrm(s, 0xd3, c, args[0]);
+ }
+ break;
+ case INDEX_op_shr_i32:
+ c = SHIFT_SHR;
+ goto gen_shift32;
+ case INDEX_op_sar_i32:
+ c = SHIFT_SAR;
+ goto gen_shift32;
+
+ case INDEX_op_add2_i32:
+ if (const_args[4])
+ tgen_arithi(s, ARITH_ADD, args[0], args[4]);
+ else
+ tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
+ if (const_args[5])
+ tgen_arithi(s, ARITH_ADC, args[1], args[5]);
+ else
+ tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
+ break;
+ case INDEX_op_sub2_i32:
+ if (const_args[4])
+ tgen_arithi(s, ARITH_SUB, args[0], args[4]);
+ else
+ tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
+ if (const_args[5])
+ tgen_arithi(s, ARITH_SBB, args[1], args[5]);
+ else
+ tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
+ break;
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
+ break;
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2(s, args, const_args);
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+ default:
+ tcg_abort();
+ }
+}
+
+static const TCGTargetOpDef x86_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "q", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "0", "ri" } },
+ { INDEX_op_sub_i32, { "r", "0", "ri" } },
+ { INDEX_op_mul_i32, { "r", "0", "ri" } },
+ { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
+ { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_and_i32, { "r", "0", "ri" } },
+ { INDEX_op_or_i32, { "r", "0", "ri" } },
+ { INDEX_op_xor_i32, { "r", "0", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "0", "ci" } },
+ { INDEX_op_shr_i32, { "r", "0", "ci" } },
+ { INDEX_op_sar_i32, { "r", "0", "ci" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+
+ { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
+ { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
+
+#if TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "cb", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L" } },
+#else
+ { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
+
+ { INDEX_op_qemu_st8, { "cb", "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
+#endif
+ { -1 },
+};
+
+static int tcg_target_callee_save_regs[] = {
+ /* TCG_REG_EBP, */ /* currently used for the global env, so no
+ need to save */
+ TCG_REG_EBX,
+ TCG_REG_ESI,
+ TCG_REG_EDI,
+};
+
+static inline void tcg_out_push(TCGContext *s, int reg)
+{
+ tcg_out_opc(s, 0x50 + reg);
+}
+
+static inline void tcg_out_pop(TCGContext *s, int reg)
+{
+ tcg_out_opc(s, 0x58 + reg);
+}
+
+/* Generate global QEMU prologue and epilogue code */
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ int i, frame_size, push_size, stack_addend;
+
+ /* TB prologue */
+ /* save all callee saved registers */
+ for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_push(s, tcg_target_callee_save_regs[i]);
+ }
+ /* reserve some stack space */
+ push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
+ frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
+ frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
+ ~(TCG_TARGET_STACK_ALIGN - 1);
+ stack_addend = frame_size - push_size;
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
+
+ tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */
+
+ /* TB epilogue */
+ tb_ret_addr = s->code_ptr;
+ tcg_out_addi(s, TCG_REG_ESP, stack_addend);
+ for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
+ tcg_out_pop(s, tcg_target_callee_save_regs[i]);
+ }
+ tcg_out8(s, 0xc3); /* ret */
+}
+
+void tcg_target_init(TCGContext *s)
+{
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
+ tcg_abort();
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_EAX) |
+ (1 << TCG_REG_EDX) |
+ (1 << TCG_REG_ECX));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
+
+ tcg_add_target_add_op_defs(x86_op_defs);
+}
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
new file mode 100644
index 0000000..37fdaa5
--- /dev/null
+++ b/tcg/i386/tcg-target.h
@@ -0,0 +1,55 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_I386 1
+
+#define TCG_TARGET_REG_BITS 32
+//#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 8
+
+enum {
+ TCG_REG_EAX = 0,
+ TCG_REG_ECX,
+ TCG_REG_EDX,
+ TCG_REG_EBX,
+ TCG_REG_ESP,
+ TCG_REG_EBP,
+ TCG_REG_ESI,
+ TCG_REG_EDI,
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_ESP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+/* Note: must be synced with dyngen-exec.h */
+#define TCG_AREG0 TCG_REG_EBP
+#define TCG_AREG1 TCG_REG_EBX
+#define TCG_AREG2 TCG_REG_ESI
+#define TCG_AREG3 TCG_REG_EDI
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+}
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
new file mode 100644
index 0000000..ad17468
--- /dev/null
+++ b/tcg/ppc/tcg-target.c
@@ -0,0 +1,1492 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+static uint8_t *tb_ret_addr;
+
+#ifdef __APPLE__
+#define LINKAGE_AREA_SIZE 24
+#define BACK_CHAIN_OFFSET 8
+#else
+#define LINKAGE_AREA_SIZE 8
+#define BACK_CHAIN_OFFSET 4
+#endif
+
+#define FAST_PATH
+#if TARGET_PHYS_ADDR_BITS <= 32
+#define ADDEND_OFFSET 0
+#else
+#define ADDEND_OFFSET 4
+#endif
+
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r0",
+ "r1",
+ "rp",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31"
+};
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31,
+#ifdef __APPLE__
+ TCG_REG_R2,
+#endif
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+#ifndef __APPLE__
+ TCG_REG_R11,
+#endif
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R3,
+ TCG_REG_R4
+};
+
+static const int tcg_target_callee_save_regs[] = {
+#ifdef __APPLE__
+ TCG_REG_R11,
+ TCG_REG_R13,
+#endif
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if ((disp << 6) >> 6 != disp)
+ tcg_abort ();
+
+ return disp & 0x3fffffc;
+}
+
+static void reloc_pc24 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
+ | reloc_pc24_val (pc, target);
+}
+
+static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if (disp != (int16_t) disp)
+ tcg_abort ();
+
+ return disp & 0xfffc;
+}
+
+static void reloc_pc14 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
+ | reloc_pc14_val (pc, target);
+}
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_PPC_REL14:
+ reloc_pc14 (code_ptr, value);
+ break;
+ case R_PPC_REL24:
+ reloc_pc24 (code_ptr, value);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return sizeof (tcg_target_call_iarg_regs) / sizeof (tcg_target_call_iarg_regs[0]);
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'A': case 'B': case 'C': case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+#ifdef CONFIG_SOFTMMU
+ case 'L': /* qemu_ld constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ break;
+ case 'K': /* qemu_st[8..32] constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
+#if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+#endif
+ break;
+ case 'M': /* qemu_st64 constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
+ break;
+#else
+ case 'L':
+ case 'K':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'M':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ break;
+#endif
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ return 0;
+}
+
+#define OPCD(opc) ((opc)<<26)
+#define XO31(opc) (OPCD(31)|((opc)<<1))
+#define XO19(opc) (OPCD(19)|((opc)<<1))
+
+#define B OPCD(18)
+#define BC OPCD(16)
+#define LBZ OPCD(34)
+#define LHZ OPCD(40)
+#define LHA OPCD(42)
+#define LWZ OPCD(32)
+#define STB OPCD(38)
+#define STH OPCD(44)
+#define STW OPCD(36)
+
+#define ADDI OPCD(14)
+#define ADDIS OPCD(15)
+#define ORI OPCD(24)
+#define ORIS OPCD(25)
+#define XORI OPCD(26)
+#define XORIS OPCD(27)
+#define ANDI OPCD(28)
+#define ANDIS OPCD(29)
+#define MULLI OPCD( 7)
+#define CMPLI OPCD(10)
+#define CMPI OPCD(11)
+
+#define LWZU OPCD(33)
+#define STWU OPCD(37)
+
+#define RLWINM OPCD(21)
+
+#define BCLR XO19( 16)
+#define BCCTR XO19(528)
+#define CRAND XO19(257)
+#define CRANDC XO19(129)
+#define CRNAND XO19(225)
+#define CROR XO19(449)
+
+#define EXTSB XO31(954)
+#define EXTSH XO31(922)
+#define ADD XO31(266)
+#define ADDE XO31(138)
+#define ADDC XO31( 10)
+#define AND XO31( 28)
+#define SUBF XO31( 40)
+#define SUBFC XO31( 8)
+#define SUBFE XO31(136)
+#define OR XO31(444)
+#define XOR XO31(316)
+#define MULLW XO31(235)
+#define MULHWU XO31( 11)
+#define DIVW XO31(491)
+#define DIVWU XO31(459)
+#define CMP XO31( 0)
+#define CMPL XO31( 32)
+#define LHBRX XO31(790)
+#define LWBRX XO31(534)
+#define STHBRX XO31(918)
+#define STWBRX XO31(662)
+#define MFSPR XO31(339)
+#define MTSPR XO31(467)
+#define SRAWI XO31(824)
+#define NEG XO31(104)
+
+#define LBZX XO31( 87)
+#define LHZX XO31(276)
+#define LHAX XO31(343)
+#define LWZX XO31( 23)
+#define STBX XO31(215)
+#define STHX XO31(407)
+#define STWX XO31(151)
+
+#define SPR(a,b) ((((a)<<5)|(b))<<11)
+#define LR SPR(8, 0)
+#define CTR SPR(9, 0)
+
+#define SLW XO31( 24)
+#define SRW XO31(536)
+#define SRAW XO31(792)
+
+#define LMW OPCD(46)
+#define STMW OPCD(47)
+
+#define TW XO31(4)
+#define TRAP (TW | TO (31))
+
+#define RT(r) ((r)<<21)
+#define RS(r) ((r)<<21)
+#define RA(r) ((r)<<16)
+#define RB(r) ((r)<<11)
+#define TO(t) ((t)<<21)
+#define SH(s) ((s)<<11)
+#define MB(b) ((b)<<6)
+#define ME(e) ((e)<<1)
+#define BO(o) ((o)<<21)
+
+#define LK 1
+
+#define TAB(t,a,b) (RT(t) | RA(a) | RB(b))
+#define SAB(s,a,b) (RS(s) | RA(a) | RB(b))
+
+#define BF(n) ((n)<<23)
+#define BI(n, c) (((c)+((n)*4))<<16)
+#define BT(n, c) (((c)+((n)*4))<<21)
+#define BA(n, c) (((c)+((n)*4))<<16)
+#define BB(n, c) (((c)+((n)*4))<<11)
+
+#define BO_COND_TRUE BO (12)
+#define BO_COND_FALSE BO (4)
+#define BO_ALWAYS BO (20)
+
+enum {
+ CR_LT,
+ CR_GT,
+ CR_EQ,
+ CR_SO
+};
+
+static const uint32_t tcg_to_bc[10] = {
+ [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+ [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+};
+
+static void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out32 (s, OR | SAB (arg, ret, arg));
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ if (arg == (int16_t) arg)
+ tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
+ else {
+ tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
+ if (arg & 0xffff)
+ tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
+ }
+}
+
+static void tcg_out_ldst (TCGContext *s, int ret, int addr,
+ int offset, int op1, int op2)
+{
+ if (offset == (int16_t) offset)
+ tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, offset);
+ tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
+ }
+}
+
+static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) s->code_ptr;
+ if ((disp << 6) >> 6 == disp)
+ tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, (tcg_target_long) target);
+ tcg_out32 (s, MTSPR | RS (0) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
+ }
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, mem_index, s_bits, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r1, r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU
+ | RT (r1)
+ | RA (r0)
+ | offsetof (CPUState, tlb_table[mem_index][0].addr_read)
+ )
+ );
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - s_bits) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1));
+#if TARGET_LONG_BITS == 64
+ tcg_out32 (s, LWZ | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, CMP | BF (6) | RA (addr_reg2) | RB (r1));
+ tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+#endif
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I32, 4, mem_index);
+#else
+ tcg_out_mov (s, 3, addr_reg2);
+ tcg_out_mov (s, 4, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index);
+#endif
+
+ tcg_out_b (s, LK, (tcg_target_long) qemu_ld_helpers[s_bits]);
+ switch (opc) {
+ case 0|4:
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (3));
+ break;
+ case 1|4:
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (3));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ if (data_reg != 3)
+ tcg_out_mov (s, data_reg, 3);
+ break;
+ case 3:
+ if (data_reg == 3) {
+ if (data_reg2 == 4) {
+ tcg_out_mov (s, 0, 4);
+ tcg_out_mov (s, 4, 3);
+ tcg_out_mov (s, 3, 0);
+ }
+ else {
+ tcg_out_mov (s, data_reg2, 3);
+ tcg_out_mov (s, 3, 4);
+ }
+ }
+ else {
+ if (data_reg != 4) tcg_out_mov (s, data_reg, 4);
+ if (data_reg2 != 3) tcg_out_mov (s, data_reg2, 3);
+ }
+ break;
+ }
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
+ tcg_out32 (s, (LWZ
+ | RT (r0)
+ | RA (r0)
+ | (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_read))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ default:
+ case 0:
+ tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
+ break;
+ case 0|4:
+ tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg));
+ break;
+ case 1:
+ if (bswap) tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
+ else tcg_out32 (s, LHZ | RT (data_reg) | RA (r0));
+ break;
+ case 1|4:
+ if (bswap) {
+ tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LHA | RT (data_reg) | RA (r0));
+ break;
+ case 2:
+ if (bswap) tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
+ else tcg_out32 (s, LWZ | RT (data_reg)| RA (r0));
+ break;
+ case 3:
+ if (bswap) {
+ if (r0 == data_reg) {
+ tcg_out32 (s, LWBRX | RT (0) | RB (r0));
+ tcg_out32 (s, ADDI | RT (r0) | RA (r0) | 4);
+ tcg_out32 (s, LWBRX | RT (data_reg2) | RB (r0));
+ tcg_out_mov (s, data_reg, 0);
+ }
+ else {
+ tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
+ tcg_out32 (s, ADDI | RT (r0) | RA (r0) | 4);
+ tcg_out32 (s, LWBRX | RT (data_reg2) | RB (r0));
+ }
+ }
+ else {
+ if (r0 == data_reg2) {
+ tcg_out32 (s, LWZ | RT (0) | RA (r0));
+ tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4);
+ tcg_out_mov (s, data_reg2, 0);
+ }
+ else {
+ tcg_out32 (s, LWZ | RT (data_reg2) | RA (r0));
+ tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4);
+ }
+ }
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, r0, r1, data_reg, data_reg2, mem_index, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2, ir;
+ void *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU
+ | RT (r1)
+ | RA (r0)
+ | offsetof (CPUState, tlb_table[mem_index][0].addr_write)
+ )
+ );
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - opc) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+
+ tcg_out32 (s, CMP | (7 << 23) | RA (r2) | RB (r1));
+#if TARGET_LONG_BITS == 64
+ tcg_out32 (s, LWZ | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, CMP | BF (6) | RA (addr_reg2) | RB (r1));
+ tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+#endif
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov (s, 3, addr_reg);
+ ir = 4;
+#else
+ tcg_out_mov (s, 3, addr_reg2);
+ tcg_out_mov (s, 4, addr_reg);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ ir = 5;
+#else
+ ir = 4;
+#endif
+#endif
+
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, (RLWINM
+ | RA (ir)
+ | RS (data_reg)
+ | SH (0)
+ | MB (24)
+ | ME (31)));
+ break;
+ case 1:
+ tcg_out32 (s, (RLWINM
+ | RA (ir)
+ | RS (data_reg)
+ | SH (0)
+ | MB (16)
+ | ME (31)));
+ break;
+ case 2:
+ tcg_out_mov (s, ir, data_reg);
+ break;
+ case 3:
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ ir = 5;
+#endif
+ tcg_out_mov (s, ir++, data_reg2);
+ tcg_out_mov (s, ir, data_reg);
+ break;
+ }
+ ir++;
+
+ tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index);
+ tcg_out_b (s, LK, (tcg_target_long) qemu_st_helpers[opc]);
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ tcg_out32 (s, (LWZ
+ | RT (r0)
+ | RA (r0)
+ | (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_write))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+ r1 = 3;
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, STB | RS (data_reg) | RA (r0));
+ break;
+ case 1:
+ if (bswap) tcg_out32 (s, STHBRX | RS (data_reg) | RA (0) | RB (r0));
+ else tcg_out32 (s, STH | RS (data_reg) | RA (r0));
+ break;
+ case 2:
+ if (bswap) tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
+ else tcg_out32 (s, STW | RS (data_reg) | RA (r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
+ tcg_out32 (s, STWBRX | RS (data_reg2) | RA (0) | RB (r1));
+ }
+ else {
+ tcg_out32 (s, STW | RS (data_reg2) | RA (r0));
+ tcg_out32 (s, STW | RS (data_reg) | RA (r0) | 4);
+ }
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+void tcg_target_qemu_prologue (TCGContext *s)
+{
+ int i, frame_size;
+
+ frame_size = 0
+ + LINKAGE_AREA_SIZE
+ + TCG_STATIC_CALL_ARGS_SIZE
+ + ARRAY_SIZE (tcg_target_callee_save_regs) * 4
+ ;
+ frame_size = (frame_size + 15) & ~15;
+
+ tcg_out32 (s, MFSPR | RT (0) | LR);
+ tcg_out32 (s, STWU | RS (1) | RA (1) | (-frame_size & 0xffff));
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (STW
+ | RS (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size + BACK_CHAIN_OFFSET));
+
+ tcg_out32 (s, MTSPR | RS (3) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ tb_ret_addr = s->code_ptr;
+
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (LWZ
+ | RT (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, LWZ | RT (0) | RA (1) | (frame_size + BACK_CHAIN_OFFSET));
+ tcg_out32 (s, MTSPR | RS (0) | LR);
+ tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
+ tcg_out32 (s, BCLR | BO_ALWAYS);
+}
+
+static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
+ tcg_target_long arg2)
+{
+ tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
+}
+
+static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
+ tcg_target_long arg2)
+{
+ tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
+}
+
+static void ppc_addi (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ if (!si && rt == ra)
+ return;
+
+ if (si == (int16_t) si)
+ tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff));
+ else {
+ uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15);
+ tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h);
+ tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff));
+ }
+}
+
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ ppc_addi (s, reg, reg, val);
+}
+
+static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
+ int const_arg2, int cr)
+{
+ int imm;
+ uint32_t op;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ else if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMP;
+ imm = 0;
+ break;
+
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ if (const_arg2) {
+ if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ default:
+ tcg_abort ();
+ }
+ op |= BF (cr);
+
+ if (imm)
+ tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
+ else {
+ if (const_arg2) {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, arg2);
+ tcg_out32 (s, op | RA (arg1) | RB (0));
+ }
+ else
+ tcg_out32 (s, op | RA (arg1) | RB (arg2));
+ }
+
+}
+
+static void tcg_out_bc (TCGContext *s, int bc, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
+ else {
+ uint16_t val = *(uint16_t *) &s->code_ptr[2];
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, bc | (val & 0xfffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
+ }
+}
+
+static void tcg_out_brcond (TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7);
+ tcg_out_bc (s, tcg_to_bc[cond], label_index);
+}
+
+/* XXX: we implement it at the target level to avoid having to
+ handle cross basic blocks temporaries */
+static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
+ const int *const_args)
+{
+ int cond = args[4], label_index = args[5], op;
+ struct { int bit1; int bit2; int cond2; } bits[] = {
+ [TCG_COND_LT ] = { CR_LT, CR_LT, TCG_COND_LT },
+ [TCG_COND_LE ] = { CR_LT, CR_GT, TCG_COND_LT },
+ [TCG_COND_GT ] = { CR_GT, CR_GT, TCG_COND_GT },
+ [TCG_COND_GE ] = { CR_GT, CR_LT, TCG_COND_GT },
+ [TCG_COND_LTU] = { CR_LT, CR_LT, TCG_COND_LTU },
+ [TCG_COND_LEU] = { CR_LT, CR_GT, TCG_COND_LTU },
+ [TCG_COND_GTU] = { CR_GT, CR_GT, TCG_COND_GTU },
+ [TCG_COND_GEU] = { CR_GT, CR_LT, TCG_COND_GTU },
+ }, *b = &bits[cond];
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ op = (cond == TCG_COND_EQ) ? CRAND : CRNAND;
+ tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 6);
+ tcg_out_cmp (s, cond, args[1], args[3], const_args[3], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ case TCG_COND_GE:
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ case TCG_COND_GEU:
+ op = (b->bit1 != b->bit2) ? CRANDC : CRAND;
+ tcg_out_cmp (s, b->cond2, args[1], args[3], const_args[3], 5);
+ tcg_out_cmp (s, TCG_COND_EQ, args[1], args[3], const_args[3], 6);
+ tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, b->bit2));
+ tcg_out32 (s, CROR | BT (7, CR_EQ) | BA (5, b->bit1) | BB (7, CR_EQ));
+ break;
+ default:
+ tcg_abort();
+ }
+
+ tcg_out_bc (s, (BC | BI (7, CR_EQ) | BO_COND_TRUE), label_index);
+}
+
+void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
+{
+ uint32_t *ptr;
+ long disp = addr - jmp_addr;
+ unsigned long patch_size;
+
+ ptr = (uint32_t *)jmp_addr;
+
+ if ((disp << 6) >> 6 != disp) {
+ ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */
+ ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */
+ ptr[2] = 0x7c0903a6; /* mtctr 0 */
+ ptr[3] = 0x4e800420; /* brctr */
+ patch_size = 16;
+ } else {
+ /* patch the branch destination */
+ if (disp != 16) {
+ *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */
+ patch_size = 4;
+ } else {
+ ptr[0] = 0x60000000; /* nop */
+ ptr[1] = 0x60000000;
+ ptr[2] = 0x60000000;
+ ptr[3] = 0x60000000;
+ patch_size = 16;
+ }
+ }
+ /* flush icache */
+ flush_icache_range(jmp_addr, jmp_addr + patch_size);
+}
+
+static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi (s, TCG_TYPE_I32, TCG_REG_R3, args[0]);
+ tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->code_ptr += 16;
+ }
+ else {
+ tcg_abort ();
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_br:
+ {
+ TCGLabel *l = &s->labels[args[0]];
+
+ if (l->has_value) {
+ tcg_out_b (s, 0, l->u.value);
+ }
+ else {
+ uint32_t val = *(uint32_t *) s->code_ptr;
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, B | (val & 0x3fffffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
+ }
+ }
+ break;
+ case INDEX_op_call:
+ if (const_args[0]) {
+ tcg_out_b (s, LK, args[0]);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (args[0]) | LR);
+ tcg_out32 (s, BCLR | BO_ALWAYS | LK);
+ }
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out_b (s, 0, args[0]);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (args[0]) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ }
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
+ break;
+ case INDEX_op_st8_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
+ break;
+
+ case INDEX_op_add_i32:
+ if (const_args[2])
+ ppc_addi (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i32:
+ if (const_args[2])
+ ppc_addi (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_and_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | args[2]);
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, AND | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_or_i32:
+ if (const_args[2]) {
+ if (args[2] & 0xffff) {
+ tcg_out32 (s, ORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ if (args[2] >> 16)
+ tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ else {
+ tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ }
+ else
+ tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_xor_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, XORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, XOR | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ if (args[2] == (int16_t) args[2])
+ tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1])
+ | (args[2] & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], 0));
+ }
+ }
+ else
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_mulu2_i32:
+ if (args[0] == args[2] || args[0] == args[3]) {
+ tcg_out32 (s, MULLW | TAB (0, args[2], args[3]));
+ tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, MULLW | TAB (args[0], args[2], args[3]));
+ tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
+ }
+ break;
+
+ case INDEX_op_shl_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (args[2])
+ | MB (0)
+ | ME (31 - args[2])
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (32 - args[2])
+ | MB (args[2])
+ | ME (31)
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i32:
+ if (const_args[2])
+ tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
+ else
+ tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_add2_i32:
+ if (args[0] == args[3] || args[0] == args[5]) {
+ tcg_out32 (s, ADDC | TAB (0, args[2], args[4]));
+ tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, ADDC | TAB (args[0], args[2], args[4]));
+ tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5]));
+ }
+ break;
+ case INDEX_op_sub2_i32:
+ if (args[0] == args[3] || args[0] == args[5]) {
+ tcg_out32 (s, SUBFC | TAB (0, args[4], args[2]));
+ tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, SUBFC | TAB (args[0], args[4], args[2]));
+ tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3]));
+ }
+ break;
+
+ case INDEX_op_brcond_i32:
+ /*
+ args[0] = r0
+ args[1] = r1
+ args[2] = cond
+ args[3] = r1 is const
+ args[4] = label_index
+ */
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3]);
+ break;
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2(s, args, const_args);
+ break;
+
+ case INDEX_op_neg_i32:
+ tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ tcg_out32 (s, EXTSB | RS (args[1]) | RA (args[0]));
+ break;
+ case INDEX_op_ext16s_i32:
+ tcg_out32 (s, EXTSH | RS (args[1]) | RA (args[0]));
+ break;
+
+ default:
+ tcg_dump_ops (s, stderr);
+ tcg_abort ();
+ }
+}
+
+static const TCGTargetOpDef ppc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "ri" } },
+ { INDEX_op_mul_i32, { "r", "r", "ri" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+ { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "ri" } },
+ { INDEX_op_and_i32, { "r", "r", "ri" } },
+ { INDEX_op_or_i32, { "r", "r", "ri" } },
+ { INDEX_op_xor_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+
+ { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
+
+ { INDEX_op_neg_i32, { "r", "r" } },
+
+#if TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "K", "K" } },
+ { INDEX_op_qemu_st16, { "K", "K" } },
+ { INDEX_op_qemu_st32, { "K", "K" } },
+ { INDEX_op_qemu_st64, { "M", "M", "M" } },
+#else
+ { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L", "L", "L" } },
+
+ { INDEX_op_qemu_st8, { "K", "K", "K" } },
+ { INDEX_op_qemu_st16, { "K", "K", "K" } },
+ { INDEX_op_qemu_st32, { "K", "K", "K" } },
+ { INDEX_op_qemu_st64, { "M", "M", "M", "M" } },
+#endif
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R0) |
+#ifdef __APPLE__
+ (1 << TCG_REG_R2) |
+#endif
+ (1 << TCG_REG_R3) |
+ (1 << TCG_REG_R4) |
+ (1 << TCG_REG_R5) |
+ (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) |
+ (1 << TCG_REG_R8) |
+ (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) |
+ (1 << TCG_REG_R11) |
+ (1 << TCG_REG_R12)
+ );
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);
+#ifndef __APPLE__
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2);
+#endif
+
+ tcg_add_target_add_op_defs(ppc_op_defs);
+}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
new file mode 100644
index 0000000..d46c19d
--- /dev/null
+++ b/tcg/ppc/tcg-target.h
@@ -0,0 +1,105 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_PPC 1
+
+#define TCG_TARGET_REG_BITS 32
+#define TCG_TARGET_WORDS_BIGENDIAN
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R1
+#define TCG_TARGET_STACK_ALIGN 16
+#ifdef __APPLE__
+#define TCG_TARGET_CALL_STACK_OFFSET 24
+#else
+#define TCG_TARGET_CALL_ALIGN_ARGS 1
+#define TCG_TARGET_CALL_STACK_OFFSET 8
+#endif
+
+/* optional instructions */
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_div_i32
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+
+#define TCG_AREG0 TCG_REG_R27
+#define TCG_AREG1 TCG_REG_R24
+#define TCG_AREG2 TCG_REG_R25
+#define TCG_AREG3 TCG_REG_R26
+
+/* taken directly from tcg-dyngen.c */
+#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ start &= ~(MIN_CACHE_LINE_SIZE - 1);
+ stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
+
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
+}
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
new file mode 100644
index 0000000..6b16efa
--- /dev/null
+++ b/tcg/ppc64/tcg-target.c
@@ -0,0 +1,1491 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#define TCG_CT_CONST_U32 0x100
+
+static uint8_t *tb_ret_addr;
+
+#define FAST_PATH
+
+#if TARGET_PHYS_ADDR_BITS == 32
+#define LD_ADDEND LWZ
+#else
+#define LD_ADDEND LD
+#endif
+
+#if TARGET_LONG_BITS == 32
+#define LD_ADDR LWZU
+#define CMP_L 0
+#else
+#define LD_ADDR LDU
+#define CMP_L (1<<21)
+#endif
+
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r0",
+ "r1",
+ "rp",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31"
+};
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R3
+};
+
+static const int tcg_target_callee_save_regs[] = {
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if ((disp << 38) >> 38 != disp)
+ tcg_abort ();
+
+ return disp & 0x3fffffc;
+}
+
+static void reloc_pc24 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
+ | reloc_pc24_val (pc, target);
+}
+
+static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if (disp != (int16_t) disp)
+ tcg_abort ();
+
+ return disp & 0xfffc;
+}
+
+static void reloc_pc14 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
+ | reloc_pc14_val (pc, target);
+}
+
+static void patch_reloc (uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_PPC_REL14:
+ reloc_pc14 (code_ptr, value);
+ break;
+ case R_PPC_REL24:
+ reloc_pc24 (code_ptr, value);
+ break;
+ default:
+ tcg_abort ();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static int tcg_target_get_call_iarg_regs_count (int flags)
+{
+ return sizeof (tcg_target_call_iarg_regs) / sizeof (tcg_target_call_iarg_regs[0]);
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint (TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'A': case 'B': case 'C': case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg (ct->u.regs, 3 + ct_str[0] - 'A');
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'L': /* qemu_ld constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
+#ifdef CONFIG_SOFTMMU
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
+#endif
+ break;
+ case 'S': /* qemu_st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
+#ifdef CONFIG_SOFTMMU
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
+#endif
+ break;
+ case 'Z':
+ ct->ct |= TCG_CT_CONST_U32;
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static int tcg_target_const_match (tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_U32) && (val == (uint32_t) val))
+ return 1;
+ return 0;
+}
+
+#define OPCD(opc) ((opc)<<26)
+#define XO19(opc) (OPCD(19)|((opc)<<1))
+#define XO30(opc) (OPCD(30)|((opc)<<2))
+#define XO31(opc) (OPCD(31)|((opc)<<1))
+#define XO58(opc) (OPCD(58)|(opc))
+#define XO62(opc) (OPCD(62)|(opc))
+
+#define B OPCD( 18)
+#define BC OPCD( 16)
+#define LBZ OPCD( 34)
+#define LHZ OPCD( 40)
+#define LHA OPCD( 42)
+#define LWZ OPCD( 32)
+#define STB OPCD( 38)
+#define STH OPCD( 44)
+#define STW OPCD( 36)
+
+#define STD XO62( 0)
+#define STDU XO62( 1)
+#define STDX XO31(149)
+
+#define LD XO58( 0)
+#define LDX XO31( 21)
+#define LDU XO58( 1)
+#define LWA XO58( 2)
+#define LWAX XO31(341)
+
+#define ADDI OPCD( 14)
+#define ADDIS OPCD( 15)
+#define ORI OPCD( 24)
+#define ORIS OPCD( 25)
+#define XORI OPCD( 26)
+#define XORIS OPCD( 27)
+#define ANDI OPCD( 28)
+#define ANDIS OPCD( 29)
+#define MULLI OPCD( 7)
+#define CMPLI OPCD( 10)
+#define CMPI OPCD( 11)
+
+#define LWZU OPCD( 33)
+#define STWU OPCD( 37)
+
+#define RLWINM OPCD( 21)
+
+#define RLDICL XO30( 0)
+#define RLDICR XO30( 1)
+
+#define BCLR XO19( 16)
+#define BCCTR XO19(528)
+#define CRAND XO19(257)
+#define CRANDC XO19(129)
+#define CRNAND XO19(225)
+#define CROR XO19(449)
+
+#define EXTSB XO31(954)
+#define EXTSH XO31(922)
+#define EXTSW XO31(986)
+#define ADD XO31(266)
+#define ADDE XO31(138)
+#define ADDC XO31( 10)
+#define AND XO31( 28)
+#define SUBF XO31( 40)
+#define SUBFC XO31( 8)
+#define SUBFE XO31(136)
+#define OR XO31(444)
+#define XOR XO31(316)
+#define MULLW XO31(235)
+#define MULHWU XO31( 11)
+#define DIVW XO31(491)
+#define DIVWU XO31(459)
+#define CMP XO31( 0)
+#define CMPL XO31( 32)
+#define LHBRX XO31(790)
+#define LWBRX XO31(534)
+#define STHBRX XO31(918)
+#define STWBRX XO31(662)
+#define MFSPR XO31(339)
+#define MTSPR XO31(467)
+#define SRAWI XO31(824)
+#define NEG XO31(104)
+
+#define MULLD XO31(233)
+#define MULHD XO31( 73)
+#define MULHDU XO31( 9)
+#define DIVD XO31(489)
+#define DIVDU XO31(457)
+
+#define LBZX XO31( 87)
+#define LHZX XO31(276)
+#define LHAX XO31(343)
+#define LWZX XO31( 23)
+#define STBX XO31(215)
+#define STHX XO31(407)
+#define STWX XO31(151)
+
+#define SPR(a,b) ((((a)<<5)|(b))<<11)
+#define LR SPR(8, 0)
+#define CTR SPR(9, 0)
+
+#define SLW XO31( 24)
+#define SRW XO31(536)
+#define SRAW XO31(792)
+
+#define SLD XO31( 27)
+#define SRD XO31(539)
+#define SRAD XO31(794)
+#define SRADI XO31(413<<1)
+
+#define LMW OPCD( 46)
+#define STMW OPCD( 47)
+
+#define TW XO31( 4)
+#define TRAP (TW | TO (31))
+
+#define RT(r) ((r)<<21)
+#define RS(r) ((r)<<21)
+#define RA(r) ((r)<<16)
+#define RB(r) ((r)<<11)
+#define TO(t) ((t)<<21)
+#define SH(s) ((s)<<11)
+#define MB(b) ((b)<<6)
+#define ME(e) ((e)<<1)
+#define BO(o) ((o)<<21)
+#define MB64(b) ((b)<<5)
+
+#define LK 1
+
+#define TAB(t,a,b) (RT(t) | RA(a) | RB(b))
+#define SAB(s,a,b) (RS(s) | RA(a) | RB(b))
+
+#define BF(n) ((n)<<23)
+#define BI(n, c) (((c)+((n)*4))<<16)
+#define BT(n, c) (((c)+((n)*4))<<21)
+#define BA(n, c) (((c)+((n)*4))<<16)
+#define BB(n, c) (((c)+((n)*4))<<11)
+
+#define BO_COND_TRUE BO (12)
+#define BO_COND_FALSE BO ( 4)
+#define BO_ALWAYS BO (20)
+
+enum {
+ CR_LT,
+ CR_GT,
+ CR_EQ,
+ CR_SO
+};
+
+static const uint32_t tcg_to_bc[10] = {
+ [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+ [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+};
+
+static void tcg_out_mov (TCGContext *s, int ret, int arg)
+{
+ tcg_out32 (s, OR | SAB (arg, ret, arg));
+}
+
+static void tcg_out_rld (TCGContext *s, int op, int ra, int rs, int sh, int mb)
+{
+ sh = SH (sh & 0x1f) | (((sh >> 5) & 1) << 1);
+ mb = MB64 ((mb >> 5) | ((mb << 1) & 0x3f));
+ tcg_out32 (s, op | RA (ra) | RS (rs) | sh | mb);
+}
+
+static void tcg_out_movi32 (TCGContext *s, int ret, int32_t arg)
+{
+ if (arg == (int16_t) arg)
+ tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
+ else {
+ tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
+ if (arg & 0xffff)
+ tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
+ }
+}
+
+static void tcg_out_movi (TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ int32_t arg32 = arg;
+
+ if (type == TCG_TYPE_I32 || arg == arg32) {
+ tcg_out_movi32 (s, ret, arg32);
+ }
+ else {
+ if ((uint64_t) arg >> 32) {
+ uint16_t h16 = arg >> 16;
+ uint16_t l16 = arg;
+
+ tcg_out_movi32 (s, ret, arg >> 32);
+ tcg_out_rld (s, RLDICR, ret, ret, 32, 31);
+ if (h16) tcg_out32 (s, ORIS | RS (ret) | RA (ret) | h16);
+ if (l16) tcg_out32 (s, ORI | RS (ret) | RA (ret) | l16);
+ }
+ else {
+ tcg_out_movi32 (s, ret, arg32);
+ if (arg32 < 0)
+ tcg_out_rld (s, RLDICL, ret, ret, 0, 32);
+ }
+ }
+}
+
+static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
+{
+ int reg;
+
+ if (const_arg) {
+ reg = 2;
+ tcg_out_movi (s, TCG_TYPE_I64, reg, arg);
+ }
+ else reg = arg;
+
+ tcg_out32 (s, LD | RT (0) | RA (reg));
+ tcg_out32 (s, MTSPR | RA (0) | CTR);
+ tcg_out32 (s, LD | RT (11) | RA (reg) | 16);
+ tcg_out32 (s, LD | RT (2) | RA (reg) | 8);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | LK);
+}
+
+static void tcg_out_ldst (TCGContext *s, int ret, int addr,
+ int offset, int op1, int op2)
+{
+ if (offset == (int16_t) offset)
+ tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, offset);
+ tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
+ }
+}
+
+static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) s->code_ptr;
+ if ((disp << 38) >> 38 == disp)
+ tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, (tcg_target_long) target);
+ tcg_out32 (s, MTSPR | RS (0) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
+ }
+}
+
+#if defined (CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+
+static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
+ int addr_reg, int s_bits, int offset)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU | RT (r1) | RA (r0) | offset));
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - s_bits) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+#else
+ tcg_out_rld (s, RLDICL, r0, addr_reg,
+ 64 - TARGET_PAGE_BITS,
+ 64 - CPU_TLB_BITS);
+ tcg_out_rld (s, RLDICR, r0, r0,
+ CPU_TLB_ENTRY_BITS,
+ 63 - CPU_TLB_ENTRY_BITS);
+
+ tcg_out32 (s, ADD | TAB (r0, r0, TCG_AREG0));
+ tcg_out32 (s, LD_ADDR | RT (r1) | RA (r0) | offset);
+
+ if (!s_bits) {
+ tcg_out_rld (s, RLDICR, r2, addr_reg, 0, 63 - TARGET_PAGE_BITS);
+ }
+ else {
+ tcg_out_rld (s, RLDICL, r2, addr_reg,
+ 64 - TARGET_PAGE_BITS,
+ TARGET_PAGE_BITS - s_bits);
+ tcg_out_rld (s, RLDICL, r2, r2, TARGET_PAGE_BITS, 0);
+ }
+#endif
+}
+#endif
+
+static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+ s_bits = opc & 3;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+
+ tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
+ offsetof (CPUState, tlb_table[mem_index][0].addr_read));
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index);
+
+ tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
+
+ switch (opc) {
+ case 0|4:
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (3));
+ break;
+ case 1|4:
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (3));
+ break;
+ case 2|4:
+ tcg_out32 (s, EXTSW | RA (data_reg) | RS (3));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ if (data_reg != 3)
+ tcg_out_mov (s, data_reg, 3);
+ break;
+ }
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
+ tcg_out32 (s, (LD_ADDEND
+ | RT (r0)
+ | RA (r0)
+ | (offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_read))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+#endif
+ r0 = addr_reg;
+ r1 = 3;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ default:
+ case 0:
+ tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
+ break;
+ case 0|4:
+ tcg_out32 (s, LBZ | RT (data_reg) | RA (r0));
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg));
+ break;
+ case 1:
+ if (bswap) tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
+ else tcg_out32 (s, LHZ | RT (data_reg) | RA (r0));
+ break;
+ case 1|4:
+ if (bswap) {
+ tcg_out32 (s, LHBRX | RT (data_reg) | RB (r0));
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LHA | RT (data_reg) | RA (r0));
+ break;
+ case 2:
+ if (bswap) tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
+ else tcg_out32 (s, LWZ | RT (data_reg)| RA (r0));
+ break;
+ case 2|4:
+ if (bswap) {
+ tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
+ tcg_out32 (s, EXTSW | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LWA | RT (data_reg)| RA (r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, LWBRX | RT (0) | RB (r0));
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, LWBRX | RT (data_reg) | RB (r1));
+ tcg_out_rld (s, RLDICR, data_reg, data_reg, 32, 31);
+ tcg_out32 (s, OR | SAB (0, data_reg, data_reg));
+ }
+ else tcg_out32 (s, LD | RT (data_reg) | RA (r0));
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, r0, r1, data_reg, mem_index, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+
+ tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
+ offsetof (CPUState, tlb_table[mem_index][0].addr_write));
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc)));
+ tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index);
+
+ tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
+
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ tcg_out32 (s, (LD_ADDEND
+ | RT (r0)
+ | RA (r0)
+ | (offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_write))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+#endif
+ r1 = 3;
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, STB | RS (data_reg) | RA (r0));
+ break;
+ case 1:
+ if (bswap) tcg_out32 (s, STHBRX | RS (data_reg) | RA (0) | RB (r0));
+ else tcg_out32 (s, STH | RS (data_reg) | RA (r0));
+ break;
+ case 2:
+ if (bswap) tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
+ else tcg_out32 (s, STW | RS (data_reg) | RA (r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, STWBRX | RS (data_reg) | RA (0) | RB (r0));
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out_rld (s, RLDICL, 0, data_reg, 32, 0);
+ tcg_out32 (s, STWBRX | RS (0) | RA (0) | RB (r1));
+ }
+ else tcg_out32 (s, STD | RS (data_reg) | RA (r0));
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+void tcg_target_qemu_prologue (TCGContext *s)
+{
+ int i, frame_size;
+ uint64_t addr;
+
+ frame_size = 0
+ + 8 /* back chain */
+ + 8 /* CR */
+ + 8 /* LR */
+ + 8 /* compiler doubleword */
+ + 8 /* link editor doubleword */
+ + 8 /* TOC save area */
+ + TCG_STATIC_CALL_ARGS_SIZE
+ + ARRAY_SIZE (tcg_target_callee_save_regs) * 8
+ ;
+ frame_size = (frame_size + 15) & ~15;
+
+ /* First emit adhoc function descriptor */
+ addr = (uint64_t) s->code_ptr + 24;
+ tcg_out32 (s, addr >> 32); tcg_out32 (s, addr); /* entry point */
+ s->code_ptr += 16; /* skip TOC and environment pointer */
+
+ /* Prologue */
+ tcg_out32 (s, MFSPR | RT (0) | LR);
+ tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff));
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (STD
+ | RS (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 16));
+
+ tcg_out32 (s, MTSPR | RS (3) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+
+ /* Epilogue */
+ tb_ret_addr = s->code_ptr;
+
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (LD
+ | RT (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, LD | RT (0) | RA (1) | (frame_size + 16));
+ tcg_out32 (s, MTSPR | RS (0) | LR);
+ tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
+ tcg_out32 (s, BCLR | BO_ALWAYS);
+}
+
+static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
+ tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
+ else
+ tcg_out_ldst (s, ret, arg1, arg2, LD, LDX);
+}
+
+static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
+ tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
+ else
+ tcg_out_ldst (s, arg, arg1, arg2, STD, STDX);
+}
+
+static void ppc_addi32 (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ if (!si && rt == ra)
+ return;
+
+ if (si == (int16_t) si)
+ tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff));
+ else {
+ uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15);
+ tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h);
+ tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff));
+ }
+}
+
+static void ppc_addi64 (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ /* XXX: suboptimal */
+ if (si == (int16_t) si
+ || (((uint64_t) si >> 31) == 0) && (si & 0x8000) == 0)
+ ppc_addi32 (s, rt, ra, si);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, si);
+ tcg_out32 (s, ADD | RT (rt) | RA (ra));
+ }
+}
+
+static void tcg_out_addi (TCGContext *s, int reg, tcg_target_long val)
+{
+ ppc_addi64 (s, reg, reg, val);
+}
+
+static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
+ int const_arg2, int cr, int arch64)
+{
+ int imm;
+ uint32_t op;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ else if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMP;
+ imm = 0;
+ break;
+
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ if (const_arg2) {
+ if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ default:
+ tcg_abort ();
+ }
+ op |= BF (cr) | (arch64 << 21);
+
+ if (imm)
+ tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
+ else {
+ if (const_arg2) {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, arg2);
+ tcg_out32 (s, op | RA (arg1) | RB (0));
+ }
+ else
+ tcg_out32 (s, op | RA (arg1) | RB (arg2));
+ }
+
+}
+
+static void tcg_out_bc (TCGContext *s, int bc, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
+ else {
+ uint16_t val = *(uint16_t *) &s->code_ptr[2];
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, bc | (val & 0xfffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
+ }
+}
+
+static void tcg_out_brcond (TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index, int arch64)
+{
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7, arch64);
+ tcg_out_bc (s, tcg_to_bc[cond], label_index);
+}
+
+void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
+{
+ TCGContext s;
+ unsigned long patch_size;
+
+ s.code_ptr = (uint8_t *) jmp_addr;
+ tcg_out_b (&s, 0, addr);
+ patch_size = s.code_ptr - (uint8_t *) jmp_addr;
+ flush_icache_range (jmp_addr, jmp_addr + patch_size);
+}
+
+static void tcg_out_op (TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi (s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
+ tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->code_ptr += 28;
+ }
+ else {
+ tcg_abort ();
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_br:
+ {
+ TCGLabel *l = &s->labels[args[0]];
+
+ if (l->has_value) {
+ tcg_out_b (s, 0, l->u.value);
+ }
+ else {
+ uint32_t val = *(uint32_t *) s->code_ptr;
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, B | (val & 0x3fffffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
+ }
+ }
+ break;
+ case INDEX_op_call:
+ tcg_out_call (s, args[0], const_args[0]);
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out_b (s, 0, args[0]);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (args[0]) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ }
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi (s, TCG_TYPE_I32, args[0], args[1]);
+ break;
+ case INDEX_op_movi_i64:
+ tcg_out_movi (s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ break;
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
+ break;
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
+ break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LWA, LWAX);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LD, LDX);
+ break;
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STD, STDX);
+ break;
+
+ case INDEX_op_add_i32:
+ if (const_args[2])
+ ppc_addi32 (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i32:
+ if (const_args[2])
+ ppc_addi32 (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_and_i64:
+ case INDEX_op_and_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | args[2]);
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, (opc == INDEX_op_and_i32
+ ? TCG_TYPE_I32
+ : TCG_TYPE_I64),
+ 0, args[2]);
+ tcg_out32 (s, AND | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_or_i64:
+ case INDEX_op_or_i32:
+ if (const_args[2]) {
+ if (args[2] & 0xffff) {
+ tcg_out32 (s, ORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ if (args[2] >> 16)
+ tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ else {
+ tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ }
+ else
+ tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_xor_i64:
+ case INDEX_op_xor_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, XORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, (opc == INDEX_op_and_i32
+ ? TCG_TYPE_I32
+ : TCG_TYPE_I64),
+ 0, args[2]);
+ tcg_out32 (s, XOR | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ if (args[2] == (int16_t) args[2])
+ tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1])
+ | (args[2] & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], 0));
+ }
+ }
+ else
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_shl_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (args[2])
+ | MB (0)
+ | ME (31 - args[2])
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (32 - args[2])
+ | MB (args[2])
+ | ME (31)
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i32:
+ if (const_args[2])
+ tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
+ else
+ tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 0);
+ break;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 1);
+ break;
+
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
+ break;
+
+ case INDEX_op_add_i64:
+ if (const_args[2])
+ ppc_addi64 (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i64:
+ if (const_args[2])
+ ppc_addi64 (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_shl_i64:
+ if (const_args[2])
+ tcg_out_rld (s, RLDICR, args[0], args[1], args[2], 63 - args[2]);
+ else
+ tcg_out32 (s, SLD | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i64:
+ if (const_args[2])
+ tcg_out_rld (s, RLDICL, args[0], args[1], 64 - args[2], args[2]);
+ else
+ tcg_out32 (s, SRD | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i64:
+ if (const_args[2]) {
+ int sh = SH (args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
+ tcg_out32 (s, SRADI | RA (args[0]) | RS (args[1]) | sh);
+ }
+ else
+ tcg_out32 (s, SRAD | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i64:
+ tcg_out32 (s, MULLD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_div_i64:
+ tcg_out32 (s, DIVD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_divu_i64:
+ tcg_out32 (s, DIVDU | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_rem_i64:
+ tcg_out32 (s, DIVD | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+ case INDEX_op_remu_i64:
+ tcg_out32 (s, DIVDU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld (s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld (s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld (s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld (s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld (s, args, 2);
+ break;
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld (s, args, 2 | 4);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld (s, args, 3);
+ break;
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st (s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st (s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st (s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st (s, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ c = EXTSB;
+ goto gen_ext;
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ c = EXTSH;
+ goto gen_ext;
+ case INDEX_op_ext32s_i64:
+ c = EXTSW;
+ goto gen_ext;
+ gen_ext:
+ tcg_out32 (s, c | RS (args[1]) | RA (args[0]));
+ break;
+
+ default:
+ tcg_dump_ops (s, stderr);
+ tcg_abort ();
+ }
+}
+
+static const TCGTargetOpDef ppc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "ri" } },
+ { INDEX_op_mul_i32, { "r", "r", "ri" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "ri" } },
+ { INDEX_op_and_i32, { "r", "r", "ri" } },
+ { INDEX_op_or_i32, { "r", "r", "ri" } },
+ { INDEX_op_xor_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+ { INDEX_op_brcond_i64, { "r", "ri" } },
+
+ { INDEX_op_neg_i32, { "r", "r" } },
+
+ { INDEX_op_add_i64, { "r", "r", "ri" } },
+ { INDEX_op_sub_i64, { "r", "r", "ri" } },
+ { INDEX_op_and_i64, { "r", "r", "rZ" } },
+ { INDEX_op_or_i64, { "r", "r", "rZ" } },
+ { INDEX_op_xor_i64, { "r", "r", "rZ" } },
+
+ { INDEX_op_shl_i64, { "r", "r", "ri" } },
+ { INDEX_op_shr_i64, { "r", "r", "ri" } },
+ { INDEX_op_sar_i64, { "r", "r", "ri" } },
+
+ { INDEX_op_mul_i64, { "r", "r", "r" } },
+ { INDEX_op_div_i64, { "r", "r", "r" } },
+ { INDEX_op_divu_i64, { "r", "r", "r" } },
+ { INDEX_op_rem_i64, { "r", "r", "r" } },
+ { INDEX_op_remu_i64, { "r", "r", "r" } },
+
+ { INDEX_op_neg_i64, { "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "S", "S" } },
+ { INDEX_op_qemu_st16, { "S", "S" } },
+ { INDEX_op_qemu_st32, { "S", "S" } },
+ { INDEX_op_qemu_st64, { "S", "S", "S" } },
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+ { INDEX_op_ext8s_i64, { "r", "r" } },
+ { INDEX_op_ext16s_i64, { "r", "r" } },
+ { INDEX_op_ext32s_i64, { "r", "r" } },
+
+ { -1 },
+};
+
+void tcg_target_init (TCGContext *s)
+{
+ tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
+ tcg_regset_set32 (tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R0) |
+ (1 << TCG_REG_R3) |
+ (1 << TCG_REG_R4) |
+ (1 << TCG_REG_R5) |
+ (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) |
+ (1 << TCG_REG_R8) |
+ (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) |
+ (1 << TCG_REG_R11) |
+ (1 << TCG_REG_R12)
+ );
+
+ tcg_regset_clear (s->reserved_regs);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R0);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R1);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R2);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R13);
+
+ tcg_add_target_add_op_defs (ppc_op_defs);
+}
diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h
new file mode 100644
index 0000000..2174db2
--- /dev/null
+++ b/tcg/ppc64/tcg-target.h
@@ -0,0 +1,105 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_PPC64 1
+
+#define TCG_TARGET_REG_BITS 64
+#define TCG_TARGET_WORDS_BIGENDIAN
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R1
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 48
+
+/* optional instructions */
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_div_i32
+#define TCG_TARGET_HAS_neg_i64
+#define TCG_TARGET_HAS_div_i64
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+
+#define TCG_AREG0 TCG_REG_R27
+#define TCG_AREG1 TCG_REG_R24
+#define TCG_AREG2 TCG_REG_R25
+#define TCG_AREG3 TCG_REG_R26
+
+/* taken directly from tcg-dyngen.c */
+#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ start &= ~(MIN_CACHE_LINE_SIZE - 1);
+ stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
+
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
+}
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
new file mode 100644
index 0000000..f36796d
--- /dev/null
+++ b/tcg/sparc/tcg-target.c
@@ -0,0 +1,1206 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%g0",
+ "%g1",
+ "%g2",
+ "%g3",
+ "%g4",
+ "%g5",
+ "%g6",
+ "%g7",
+ "%o0",
+ "%o1",
+ "%o2",
+ "%o3",
+ "%o4",
+ "%o5",
+ "%o6",
+ "%o7",
+ "%l0",
+ "%l1",
+ "%l2",
+ "%l3",
+ "%l4",
+ "%l5",
+ "%l6",
+ "%l7",
+ "%i0",
+ "%i1",
+ "%i2",
+ "%i3",
+ "%i4",
+ "%i5",
+ "%i6",
+ "%i7",
+};
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_L0,
+ TCG_REG_L1,
+ TCG_REG_L2,
+ TCG_REG_L3,
+ TCG_REG_L4,
+ TCG_REG_L5,
+ TCG_REG_L6,
+ TCG_REG_L7,
+ TCG_REG_I0,
+ TCG_REG_I1,
+ TCG_REG_I2,
+ TCG_REG_I3,
+ TCG_REG_I4,
+};
+
+static const int tcg_target_call_iarg_regs[6] = {
+ TCG_REG_O0,
+ TCG_REG_O1,
+ TCG_REG_O2,
+ TCG_REG_O3,
+ TCG_REG_O4,
+ TCG_REG_O5,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_O0,
+ TCG_REG_O1,
+};
+
+static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
+{
+ return (val << ((sizeof(tcg_target_long) * 8 - bits))
+ >> (sizeof(tcg_target_long) * 8 - bits)) == val;
+}
+
+static inline int check_fit_i32(uint32_t val, unsigned int bits)
+{
+ return ((val << (32 - bits)) >> (32 - bits)) == val;
+}
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_SPARC_32:
+ if (value != (uint32_t)value)
+ tcg_abort();
+ *(uint32_t *)code_ptr = value;
+ break;
+ case R_SPARC_WDISP22:
+ value -= (long)code_ptr;
+ value >>= 2;
+ if (!check_fit_tl(value, 22))
+ tcg_abort();
+ *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 6;
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+ case 'L': /* qemu_ld/st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ // Helper args
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
+ break;
+ case 'I':
+ ct->ct |= TCG_CT_CONST_S11;
+ break;
+ case 'J':
+ ct->ct |= TCG_CT_CONST_S13;
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
+ return 1;
+ else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
+ return 1;
+ else
+ return 0;
+}
+
+#define INSN_OP(x) ((x) << 30)
+#define INSN_OP2(x) ((x) << 22)
+#define INSN_OP3(x) ((x) << 19)
+#define INSN_OPF(x) ((x) << 5)
+#define INSN_RD(x) ((x) << 25)
+#define INSN_RS1(x) ((x) << 14)
+#define INSN_RS2(x) (x)
+#define INSN_ASI(x) ((x) << 5)
+
+#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
+#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
+
+#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
+#define COND_N 0x0
+#define COND_E 0x1
+#define COND_LE 0x2
+#define COND_L 0x3
+#define COND_LEU 0x4
+#define COND_CS 0x5
+#define COND_NEG 0x6
+#define COND_VS 0x7
+#define COND_A 0x8
+#define COND_NE 0x9
+#define COND_G 0xa
+#define COND_GE 0xb
+#define COND_GU 0xc
+#define COND_CC 0xd
+#define COND_POS 0xe
+#define COND_VC 0xf
+#define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
+
+#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
+#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
+#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
+#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
+#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
+#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
+#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
+#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
+#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
+#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
+#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
+#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
+#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
+#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
+#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
+
+#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
+#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
+#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
+
+#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
+#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
+#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
+
+#define WRY (INSN_OP(2) | INSN_OP3(0x30))
+#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
+#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
+#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
+#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
+#define CALL INSN_OP(1)
+#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
+#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
+#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
+#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
+#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
+#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
+#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
+#define STB (INSN_OP(3) | INSN_OP3(0x05))
+#define STH (INSN_OP(3) | INSN_OP3(0x06))
+#define STW (INSN_OP(3) | INSN_OP3(0x04))
+#define STX (INSN_OP(3) | INSN_OP3(0x0e))
+#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
+#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
+#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
+#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
+#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
+#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
+#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
+#define STBA (INSN_OP(3) | INSN_OP3(0x15))
+#define STHA (INSN_OP(3) | INSN_OP3(0x16))
+#define STWA (INSN_OP(3) | INSN_OP3(0x14))
+#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
+
+#ifndef ASI_PRIMARY_LITTLE
+#define ASI_PRIMARY_LITTLE 0x88
+#endif
+
+static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
+ int op)
+{
+ tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
+ INSN_RS2(rs2));
+}
+
+static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
+ uint32_t offset, int op)
+{
+ tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
+ INSN_IMM13(offset));
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
+}
+
+static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
+{
+ tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
+}
+
+static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
+{
+ tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
+}
+
+static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
+{
+ if (check_fit_tl(arg, 12))
+ tcg_out_movi_imm13(s, ret, arg);
+ else {
+ tcg_out_sethi(s, ret, arg);
+ if (arg & 0x3ff)
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
+ }
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ if (!check_fit_tl(arg, 32) && (arg & ~0xffffffffULL) != 0) {
+ tcg_out_movi_imm32(s, TCG_REG_I4, arg >> 32);
+ tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
+ tcg_out_movi_imm32(s, ret, arg);
+ tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
+ } else if (check_fit_tl(arg, 12))
+ tcg_out_movi_imm13(s, ret, arg);
+ else {
+ tcg_out_sethi(s, ret, arg);
+ if (arg & 0x3ff)
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
+ }
+#else
+ tcg_out_movi_imm32(s, ret, arg);
+#endif
+}
+
+static inline void tcg_out_ld_raw(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out_sethi(s, ret, arg);
+ tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+}
+
+static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ if (!check_fit_tl(arg, 10))
+ tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ffULL);
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+#else
+ tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+#endif
+}
+
+static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
+{
+ if (check_fit_tl(offset, 13))
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
+ INSN_IMM13(offset));
+ else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(addr));
+ }
+}
+
+static inline void tcg_out_ldst_asi(TCGContext *s, int ret, int addr,
+ int offset, int op, int asi)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
+ INSN_ASI(asi) | INSN_RS2(addr));
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst(s, ret, arg1, arg2, LDUW);
+ else
+ tcg_out_ldst(s, ret, arg1, arg2, LDX);
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst(s, arg, arg1, arg2, STW);
+ else
+ tcg_out_ldst(s, arg, arg1, arg2, STX);
+}
+
+static inline void tcg_out_sety(TCGContext *s, tcg_target_long val)
+{
+ if (val == 0 || val == -1)
+ tcg_out32(s, WRY | INSN_IMM13(val));
+ else
+ fprintf(stderr, "unimplemented sety %ld\n", (long)val);
+}
+
+static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0) {
+ if (check_fit_tl(val, 13))
+ tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
+ else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
+ tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
+ }
+ }
+}
+
+static inline void tcg_out_andi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0) {
+ if (check_fit_tl(val, 13))
+ tcg_out_arithi(s, reg, reg, val, ARITH_AND);
+ else {
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
+ tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_AND);
+ }
+ }
+}
+
+static inline void tcg_out_nop(TCGContext *s)
+{
+ tcg_out_sethi(s, TCG_REG_G0, 0);
+}
+
+static void tcg_out_branch(TCGContext *s, int opc, int label_index)
+{
+ int32_t val;
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value) {
+ val = l->u.value - (tcg_target_long)s->code_ptr;
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2)
+ | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
+ } else {
+ tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | 0));
+ }
+}
+
+static const uint8_t tcg_cond_to_bcond[10] = {
+ [TCG_COND_EQ] = COND_E,
+ [TCG_COND_NE] = COND_NE,
+ [TCG_COND_LT] = COND_L,
+ [TCG_COND_GE] = COND_GE,
+ [TCG_COND_LE] = COND_LE,
+ [TCG_COND_GT] = COND_G,
+ [TCG_COND_LTU] = COND_CS,
+ [TCG_COND_GEU] = COND_CC,
+ [TCG_COND_LEU] = COND_LEU,
+ [TCG_COND_GTU] = COND_GU,
+};
+
+static void tcg_out_brcond(TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ if (const_arg2 && arg2 == 0)
+ /* orcc %g0, r, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_G0, arg1, ARITH_ORCC);
+ else
+ /* subcc r1, r2, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, arg1, arg2, ARITH_SUBCC);
+ tcg_out_branch(s, tcg_cond_to_bcond[cond], label_index);
+ tcg_out_nop(s);
+}
+
+/* Generate global QEMU prologue and epilogue code */
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
+ INSN_IMM13(-TCG_TARGET_STACK_MINFRAME));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I0) |
+ INSN_RS2(TCG_REG_G0));
+ tcg_out_nop(s);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static const void * const qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static const void * const qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+#if TARGET_LONG_BITS == 32
+#define TARGET_LD_OP LDUW
+#else
+#define TARGET_LD_OP LDX
+#endif
+
+#if TARGET_PHYS_ADDR_BITS == 32
+#define TARGET_ADDEND_LD_OP LDUW
+#else
+#define TARGET_ADDEND_LD_OP LDX
+#endif
+
+#ifdef __arch64__
+#define HOST_LD_OP LDX
+#define HOST_ST_OP STX
+#define HOST_SLL_OP SHIFT_SLLX
+#define HOST_SRA_OP SHIFT_SRAX
+#else
+#define HOST_LD_OP LDUW
+#define HOST_ST_OP STW
+#define HOST_SLL_OP SHIFT_SLL
+#define HOST_SRA_OP SHIFT_SRA
+#endif
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ arg0 = TCG_REG_O0;
+ arg1 = TCG_REG_O1;
+ arg2 = TCG_REG_O2;
+
+#if defined(CONFIG_SOFTMMU)
+ /* srl addr_reg, x, arg1 */
+ tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
+ SHIFT_SRL);
+ /* and addr_reg, x, arg0 */
+ tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ /* and arg1, x, arg1 */
+ tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* add arg1, x, arg1 */
+ tcg_out_addi(s, arg1, offsetof(CPUState,
+ tlb_table[mem_index][0].addr_read));
+
+ /* add env, arg1, arg1 */
+ tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
+
+ /* ld [arg1], arg2 */
+ tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
+ INSN_RS2(TCG_REG_G0));
+
+ /* subcc arg0, arg2, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
+
+ /* will become:
+ be label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* mov (delay slot) */
+ tcg_out_mov(s, arg0, addr_reg);
+
+ /* mov */
+ tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
+
+ /* XXX: move that code at the end of the TB */
+ /* qemu_ld_helper[s_bits](arg0, arg1) */
+ tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
+
+ /* data_reg = sign_extend(arg0) */
+ switch(opc) {
+ case 0 | 4:
+ /* sll arg0, 24/56, data_reg */
+ tcg_out_arithi(s, data_reg, arg0, (int)sizeof(tcg_target_long) * 8 - 8,
+ HOST_SLL_OP);
+ /* sra data_reg, 24/56, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg,
+ (int)sizeof(tcg_target_long) * 8 - 8, HOST_SRA_OP);
+ break;
+ case 1 | 4:
+ /* sll arg0, 16/48, data_reg */
+ tcg_out_arithi(s, data_reg, arg0,
+ (int)sizeof(tcg_target_long) * 8 - 16, HOST_SLL_OP);
+ /* sra data_reg, 16/48, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg,
+ (int)sizeof(tcg_target_long) * 8 - 16, HOST_SRA_OP);
+ break;
+ case 2 | 4:
+ /* sll arg0, 32, data_reg */
+ tcg_out_arithi(s, data_reg, arg0, 32, HOST_SLL_OP);
+ /* sra data_reg, 32, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg, 32, HOST_SRA_OP);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* mov */
+ tcg_out_mov(s, data_reg, arg0);
+ break;
+ }
+
+ /* will become:
+ ba label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* nop (delay slot */
+ tcg_out_nop(s);
+
+ /* label1: */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+
+ /* ld [arg1 + x], arg1 */
+ tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_read), TARGET_ADDEND_LD_OP);
+
+#if TARGET_LONG_BITS == 32
+ /* and addr_reg, x, arg0 */
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
+ tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
+ /* add arg0, arg1, arg0 */
+ tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
+#else
+ /* add addr_reg, arg1, arg0 */
+ tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
+#endif
+
+#else
+ arg0 = addr_reg;
+#endif
+
+ switch(opc) {
+ case 0:
+ /* ldub [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUB);
+ break;
+ case 0 | 4:
+ /* ldsb [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSB);
+ break;
+ case 1:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* lduh [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUH);
+#else
+ /* lduha [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 1 | 4:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldsh [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSH);
+#else
+ /* ldsha [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* lduw [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUW);
+#else
+ /* lduwa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2 | 4:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldsw [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSW);
+#else
+ /* ldswa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 3:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldx [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDX);
+#else
+ /* ldxa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDXA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label2_ptr));
+#endif
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+ s_bits = opc;
+
+ arg0 = TCG_REG_O0;
+ arg1 = TCG_REG_O1;
+ arg2 = TCG_REG_O2;
+
+#if defined(CONFIG_SOFTMMU)
+ /* srl addr_reg, x, arg1 */
+ tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
+ SHIFT_SRL);
+
+ /* and addr_reg, x, arg0 */
+ tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ /* and arg1, x, arg1 */
+ tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* add arg1, x, arg1 */
+ tcg_out_addi(s, arg1, offsetof(CPUState,
+ tlb_table[mem_index][0].addr_write));
+
+ /* add env, arg1, arg1 */
+ tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
+
+ /* ld [arg1], arg2 */
+ tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
+ INSN_RS2(TCG_REG_G0));
+
+ /* subcc arg0, arg2, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
+
+ /* will become:
+ be label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* mov (delay slot) */
+ tcg_out_mov(s, arg0, addr_reg);
+
+ /* mov */
+ tcg_out_mov(s, arg1, data_reg);
+
+ /* mov */
+ tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
+
+ /* XXX: move that code at the end of the TB */
+ /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
+ tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
+
+ /* will become:
+ ba label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* nop (delay slot) */
+ tcg_out_nop(s);
+
+ /* label1: */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+
+ /* ld [arg1 + x], arg1 */
+ tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_write), TARGET_ADDEND_LD_OP);
+
+#if TARGET_LONG_BITS == 32
+ /* and addr_reg, x, arg0 */
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
+ tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
+ /* add arg0, arg1, arg0 */
+ tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
+#else
+ /* add addr_reg, arg1, arg0 */
+ tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
+#endif
+
+#else
+ arg0 = addr_reg;
+#endif
+
+ switch(opc) {
+ case 0:
+ /* stb data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STB);
+ break;
+ case 1:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* sth data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STH);
+#else
+ /* stha data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* stw data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STW);
+#else
+ /* stwa data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 3:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* stx data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STX);
+#else
+ /* stxa data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STXA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label2_ptr));
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
+ INSN_IMM13(8));
+ tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
+ INSN_RS2(TCG_REG_G0));
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
+ INSN_IMM13((args[0] & 0x1fff)));
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ } else {
+ /* indirect jump method */
+ tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(TCG_REG_G0));
+ }
+ tcg_out_nop(s);
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0])
+ tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ else {
+ tcg_out_ld_ptr(s, TCG_REG_I5,
+ (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(TCG_REG_G0));
+ }
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - sizeof(long), HOST_LD_OP);
+ break;
+ case INDEX_op_jmp:
+ case INDEX_op_br:
+ tcg_out_branch(s, COND_A, args[0]);
+ tcg_out_nop(s);
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
+ break;
+
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+#define OP_32_64(x) \
+ glue(glue(case INDEX_op_, x), _i32:) \
+ glue(glue(case INDEX_op_, x), _i64:)
+#else
+#define OP_32_64(x) \
+ glue(glue(case INDEX_op_, x), _i32:)
+#endif
+ OP_32_64(ld8u);
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
+ break;
+ OP_32_64(ld8s);
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
+ break;
+ OP_32_64(ld16u);
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
+ break;
+ OP_32_64(ld16s);
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
+ break;
+ case INDEX_op_ld_i32:
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ case INDEX_op_ld32u_i64:
+#endif
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
+ break;
+ OP_32_64(st8);
+ tcg_out_ldst(s, args[0], args[1], args[2], STB);
+ break;
+ OP_32_64(st16);
+ tcg_out_ldst(s, args[0], args[1], args[2], STH);
+ break;
+ case INDEX_op_st_i32:
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ case INDEX_op_st32_i64:
+#endif
+ tcg_out_ldst(s, args[0], args[1], args[2], STW);
+ break;
+ OP_32_64(add);
+ c = ARITH_ADD;
+ goto gen_arith32;
+ OP_32_64(sub);
+ c = ARITH_SUB;
+ goto gen_arith32;
+ OP_32_64(and);
+ c = ARITH_AND;
+ goto gen_arith32;
+ OP_32_64(or);
+ c = ARITH_OR;
+ goto gen_arith32;
+ OP_32_64(xor);
+ c = ARITH_XOR;
+ goto gen_arith32;
+ case INDEX_op_shl_i32:
+ c = SHIFT_SLL;
+ goto gen_arith32;
+ case INDEX_op_shr_i32:
+ c = SHIFT_SRL;
+ goto gen_arith32;
+ case INDEX_op_sar_i32:
+ c = SHIFT_SRA;
+ goto gen_arith32;
+ case INDEX_op_mul_i32:
+ c = ARITH_UMUL;
+ goto gen_arith32;
+ case INDEX_op_div2_i32:
+#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
+ c = ARITH_SDIVX;
+ goto gen_arith32;
+#else
+ tcg_out_sety(s, 0);
+ c = ARITH_SDIV;
+ goto gen_arith32;
+#endif
+ case INDEX_op_divu2_i32:
+#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
+ c = ARITH_UDIVX;
+ goto gen_arith32;
+#else
+ tcg_out_sety(s, 0);
+ c = ARITH_UDIV;
+ goto gen_arith32;
+#endif
+
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
+ args[3]);
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld(s, args, 2 | 4);
+ break;
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ case INDEX_op_movi_i64:
+ tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDX);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], STX);
+ break;
+ case INDEX_op_shl_i64:
+ c = SHIFT_SLLX;
+ goto gen_arith32;
+ case INDEX_op_shr_i64:
+ c = SHIFT_SRLX;
+ goto gen_arith32;
+ case INDEX_op_sar_i64:
+ c = SHIFT_SRAX;
+ goto gen_arith32;
+ case INDEX_op_mul_i64:
+ c = ARITH_MULX;
+ goto gen_arith32;
+ case INDEX_op_div2_i64:
+ c = ARITH_SDIVX;
+ goto gen_arith32;
+ case INDEX_op_divu2_i64:
+ c = ARITH_UDIVX;
+ goto gen_arith32;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
+ args[3]);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+#endif
+ gen_arith32:
+ if (const_args[2]) {
+ tcg_out_arithi(s, args[0], args[1], args[2], c);
+ } else {
+ tcg_out_arith(s, args[0], args[1], args[2], c);
+ }
+ break;
+
+ default:
+ fprintf(stderr, "unknown opcode 0x%x\n", opc);
+ tcg_abort();
+ }
+}
+
+static const TCGTargetOpDef sparc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "rJ" } },
+ { INDEX_op_mul_i32, { "r", "r", "rJ" } },
+ { INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } },
+ { INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "rJ" } },
+ { INDEX_op_and_i32, { "r", "r", "rJ" } },
+ { INDEX_op_or_i32, { "r", "r", "rJ" } },
+ { INDEX_op_xor_i32, { "r", "r", "rJ" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "rJ" } },
+ { INDEX_op_shr_i32, { "r", "r", "rJ" } },
+ { INDEX_op_sar_i32, { "r", "r", "rJ" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+ { INDEX_op_qemu_ld64, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L" } },
+
+ { INDEX_op_add_i64, { "r", "r", "rJ" } },
+ { INDEX_op_mul_i64, { "r", "r", "rJ" } },
+ { INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } },
+ { INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } },
+ { INDEX_op_sub_i64, { "r", "r", "rJ" } },
+ { INDEX_op_and_i64, { "r", "r", "rJ" } },
+ { INDEX_op_or_i64, { "r", "r", "rJ" } },
+ { INDEX_op_xor_i64, { "r", "r", "rJ" } },
+
+ { INDEX_op_shl_i64, { "r", "r", "rJ" } },
+ { INDEX_op_shr_i64, { "r", "r", "rJ" } },
+ { INDEX_op_sar_i64, { "r", "r", "rJ" } },
+
+ { INDEX_op_brcond_i64, { "r", "ri" } },
+#endif
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
+#endif
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_G1) |
+ (1 << TCG_REG_G2) |
+ (1 << TCG_REG_G3) |
+ (1 << TCG_REG_G4) |
+ (1 << TCG_REG_G5) |
+ (1 << TCG_REG_G6) |
+ (1 << TCG_REG_G7) |
+ (1 << TCG_REG_O0) |
+ (1 << TCG_REG_O1) |
+ (1 << TCG_REG_O2) |
+ (1 << TCG_REG_O3) |
+ (1 << TCG_REG_O4) |
+ (1 << TCG_REG_O5) |
+ (1 << TCG_REG_O7));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
+#endif
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
+ tcg_add_target_add_op_defs(sparc_op_defs);
+}
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
new file mode 100644
index 0000000..8dc07d3
--- /dev/null
+++ b/tcg/sparc/tcg-target.h
@@ -0,0 +1,122 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_SPARC 1
+
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+#define TCG_TARGET_REG_BITS 64
+#else
+#define TCG_TARGET_REG_BITS 32
+#endif
+
+#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_G0 = 0,
+ TCG_REG_G1,
+ TCG_REG_G2,
+ TCG_REG_G3,
+ TCG_REG_G4,
+ TCG_REG_G5,
+ TCG_REG_G6,
+ TCG_REG_G7,
+ TCG_REG_O0,
+ TCG_REG_O1,
+ TCG_REG_O2,
+ TCG_REG_O3,
+ TCG_REG_O4,
+ TCG_REG_O5,
+ TCG_REG_O6,
+ TCG_REG_O7,
+ TCG_REG_L0,
+ TCG_REG_L1,
+ TCG_REG_L2,
+ TCG_REG_L3,
+ TCG_REG_L4,
+ TCG_REG_L5,
+ TCG_REG_L6,
+ TCG_REG_L7,
+ TCG_REG_I0,
+ TCG_REG_I1,
+ TCG_REG_I2,
+ TCG_REG_I3,
+ TCG_REG_I4,
+ TCG_REG_I5,
+ TCG_REG_I6,
+ TCG_REG_I7,
+};
+
+#define TCG_CT_CONST_S11 0x100
+#define TCG_CT_CONST_S13 0x200
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_I6
+#ifdef __arch64__
+// Reserve space for AREG0
+#define TCG_TARGET_STACK_MINFRAME (176 + 2 * (int)sizeof(long))
+#define TCG_TARGET_CALL_STACK_OFFSET (2047 + TCG_TARGET_STACK_MINFRAME)
+#define TCG_TARGET_STACK_ALIGN 16
+#else
+// AREG0 + one word for alignment
+#define TCG_TARGET_STACK_MINFRAME (92 + (2 + 1) * (int)sizeof(long))
+#define TCG_TARGET_CALL_STACK_OFFSET TCG_TARGET_STACK_MINFRAME
+#define TCG_TARGET_STACK_ALIGN 8
+#endif
+
+/* optional instructions */
+//#define TCG_TARGET_HAS_bswap_i32
+//#define TCG_TARGET_HAS_bswap_i64
+//#define TCG_TARGET_HAS_neg_i32
+//#define TCG_TARGET_HAS_neg_i64
+
+
+/* Note: must be synced with dyngen-exec.h and Makefile.target */
+#ifdef HOST_SOLARIS
+#define TCG_AREG0 TCG_REG_G2
+#define TCG_AREG1 TCG_REG_G3
+#define TCG_AREG2 TCG_REG_G4
+#define TCG_AREG3 TCG_REG_G5
+#define TCG_AREG4 TCG_REG_G6
+#elif defined(__sparc_v9__)
+#define TCG_AREG0 TCG_REG_G5
+#define TCG_AREG1 TCG_REG_G6
+#define TCG_AREG2 TCG_REG_G7
+#else
+#define TCG_AREG0 TCG_REG_G6
+#define TCG_AREG1 TCG_REG_G1
+#define TCG_AREG2 TCG_REG_G2
+#define TCG_AREG3 TCG_REG_G3
+#endif
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ p = start & ~(8UL - 1UL);
+ stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
+
+ for (; p < stop; p += 8)
+ __asm__ __volatile__("flush\t%0" : : "r" (p));
+}
diff --git a/tcg/tcg-dyngen.c b/tcg/tcg-dyngen.c
new file mode 100644
index 0000000..b4ceb5e
--- /dev/null
+++ b/tcg/tcg-dyngen.c
@@ -0,0 +1,431 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <assert.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "config.h"
+#include "osdep.h"
+
+#include "tcg.h"
+
+int __op_param1, __op_param2, __op_param3;
+#if defined(__sparc__) || defined(__arm__)
+ void __op_gen_label1(){}
+ void __op_gen_label2(){}
+ void __op_gen_label3(){}
+#else
+ int __op_gen_label1, __op_gen_label2, __op_gen_label3;
+#endif
+int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
+
+#if 0
+#if defined(__s390__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+}
+#elif defined(__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ while (start < stop) {
+ asm volatile ("fc %0" :: "r"(start));
+ start += 32;
+ }
+ asm volatile (";;sync.i;;srlz.i;;");
+}
+#elif defined(__powerpc__)
+
+#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ start &= ~(MIN_CACHE_LINE_SIZE - 1);
+ stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
+
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
+ asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
+ }
+ asm volatile ("sync" : : : "memory");
+ asm volatile ("isync" : : : "memory");
+}
+#elif defined(__alpha__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ asm ("imb");
+}
+#elif defined(__sparc__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ p = start & ~(8UL - 1UL);
+ stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
+
+ for (; p < stop; p += 8)
+ __asm__ __volatile__("flush\t%0" : : "r" (p));
+}
+#elif defined(__arm__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ register unsigned long _beg __asm ("a1") = start;
+ register unsigned long _end __asm ("a2") = stop;
+ register unsigned long _flg __asm ("a3") = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+}
+#elif defined(__mc68000)
+
+# include <asm/cachectl.h>
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16);
+}
+#elif defined(__mips__)
+
+#include <sys/cachectl.h>
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ _flush_cache ((void *)start, stop - start, BCACHE);
+}
+#else
+#error unsupported CPU
+#endif
+
+#ifdef __alpha__
+
+register int gp asm("$29");
+
+static inline void immediate_ldah(void *p, int val) {
+ uint32_t *dest = p;
+ long high = ((val >> 16) + ((val >> 15) & 1)) & 0xffff;
+
+ *dest &= ~0xffff;
+ *dest |= high;
+ *dest |= 31 << 16;
+}
+static inline void immediate_lda(void *dest, int val) {
+ *(uint16_t *) dest = val;
+}
+void fix_bsr(void *p, int offset) {
+ uint32_t *dest = p;
+ *dest &= ~((1 << 21) - 1);
+ *dest |= (offset >> 2) & ((1 << 21) - 1);
+}
+
+#endif /* __alpha__ */
+
+#ifdef __ia64
+
+/* Patch instruction with "val" where "mask" has 1 bits. */
+static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val)
+{
+ uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16);
+# define insn_mask ((1UL << 41) - 1)
+ unsigned long shift;
+
+ b0 = b[0]; b1 = b[1];
+ shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */
+ if (shift >= 64) {
+ m1 = mask << (shift - 64);
+ v1 = val << (shift - 64);
+ } else {
+ m0 = mask << shift; m1 = mask >> (64 - shift);
+ v0 = val << shift; v1 = val >> (64 - shift);
+ b[0] = (b0 & ~m0) | (v0 & m0);
+ }
+ b[1] = (b1 & ~m1) | (v1 & m1);
+}
+
+static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val)
+{
+ ia64_patch(insn_addr,
+ 0x011ffffe000UL,
+ ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
+ | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
+ ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
+}
+
+static inline void ia64_imm64 (void *insn, uint64_t val)
+{
+ /* Ignore the slot number of the relocation; GCC and Intel
+ toolchains differed for some time on whether IMM64 relocs are
+ against slot 1 (Intel) or slot 2 (GCC). */
+ uint64_t insn_addr = (uint64_t) insn & ~3UL;
+
+ ia64_patch(insn_addr + 2,
+ 0x01fffefe000UL,
+ ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
+ | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
+ | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
+ | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
+ | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)
+ );
+ ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
+}
+
+static inline void ia64_imm60b (void *insn, uint64_t val)
+{
+ /* Ignore the slot number of the relocation; GCC and Intel
+ toolchains differed for some time on whether IMM64 relocs are
+ against slot 1 (Intel) or slot 2 (GCC). */
+ uint64_t insn_addr = (uint64_t) insn & ~3UL;
+
+ if (val + ((uint64_t) 1 << 59) >= (1UL << 60))
+ fprintf(stderr, "%s: value %ld out of IMM60 range\n",
+ __FUNCTION__, (int64_t) val);
+ ia64_patch_imm60(insn_addr + 2, val);
+}
+
+static inline void ia64_imm22 (void *insn, uint64_t val)
+{
+ if (val + (1 << 21) >= (1 << 22))
+ fprintf(stderr, "%s: value %li out of IMM22 range\n",
+ __FUNCTION__, (int64_t)val);
+ ia64_patch((uint64_t) insn, 0x01fffcfe000UL,
+ ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
+ | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
+ | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
+ | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
+}
+
+/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
+ the effect of turning "addl rX=imm22,rY" into "addl
+ rX=imm22,r0". */
+static inline void ia64_imm22_r0 (void *insn, uint64_t val)
+{
+ if (val + (1 << 21) >= (1 << 22))
+ fprintf(stderr, "%s: value %li out of IMM22 range\n",
+ __FUNCTION__, (int64_t)val);
+ ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20),
+ ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
+ | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
+ | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
+ | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
+}
+
+static inline void ia64_imm21b (void *insn, uint64_t val)
+{
+ if (val + (1 << 20) >= (1 << 21))
+ fprintf(stderr, "%s: value %li out of IMM21b range\n",
+ __FUNCTION__, (int64_t)val);
+ ia64_patch((uint64_t) insn, 0x11ffffe000UL,
+ ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
+ | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
+}
+
+static inline void ia64_nop_b (void *insn)
+{
+ ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37);
+}
+
+static inline void ia64_ldxmov(void *insn, uint64_t val)
+{
+ if (val + (1 << 21) < (1 << 22))
+ ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37);
+}
+
+static inline int ia64_patch_ltoff(void *insn, uint64_t val,
+ int relaxable)
+{
+ if (relaxable && (val + (1 << 21) < (1 << 22))) {
+ ia64_imm22_r0(insn, val);
+ return 0;
+ }
+ return 1;
+}
+
+struct ia64_fixup {
+ struct ia64_fixup *next;
+ void *addr; /* address that needs to be patched */
+ long value;
+};
+
+#define IA64_PLT(insn, plt_index) \
+do { \
+ struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
+ fixup->next = plt_fixes; \
+ plt_fixes = fixup; \
+ fixup->addr = (insn); \
+ fixup->value = (plt_index); \
+ plt_offset[(plt_index)] = 1; \
+} while (0)
+
+#define IA64_LTOFF(insn, val, relaxable) \
+do { \
+ if (ia64_patch_ltoff(insn, val, relaxable)) { \
+ struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
+ fixup->next = ltoff_fixes; \
+ ltoff_fixes = fixup; \
+ fixup->addr = (insn); \
+ fixup->value = (val); \
+ } \
+} while (0)
+
+static inline void ia64_apply_fixes (uint8_t **gen_code_pp,
+ struct ia64_fixup *ltoff_fixes,
+ uint64_t gp,
+ struct ia64_fixup *plt_fixes,
+ int num_plts,
+ unsigned long *plt_target,
+ unsigned int *plt_offset)
+{
+ static const uint8_t plt_bundle[] = {
+ 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */
+ 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60,
+
+ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0
+ };
+ uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start;
+ uint64_t *vp;
+ struct ia64_fixup *fixup;
+ unsigned int offset = 0;
+ struct fdesc {
+ long ip;
+ long gp;
+ } *fdesc;
+ int i;
+
+ if (plt_fixes) {
+ plt_start = gen_code_ptr;
+
+ for (i = 0; i < num_plts; ++i) {
+ if (plt_offset[i]) {
+ plt_offset[i] = offset;
+ offset += sizeof(plt_bundle);
+
+ fdesc = (struct fdesc *) plt_target[i];
+ memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle));
+ ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp);
+ ia64_imm60b(gen_code_ptr + 0x12,
+ (fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4);
+ gen_code_ptr += sizeof(plt_bundle);
+ }
+ }
+
+ for (fixup = plt_fixes; fixup; fixup = fixup->next)
+ ia64_imm21b(fixup->addr,
+ ((long) plt_start + plt_offset[fixup->value]
+ - ((long) fixup->addr & ~0xf)) >> 4);
+ }
+
+ got_start = gen_code_ptr;
+
+ /* First, create the GOT: */
+ for (fixup = ltoff_fixes; fixup; fixup = fixup->next) {
+ /* first check if we already have this value in the GOT: */
+ for (vp = (uint64_t *) got_start; vp < (uint64_t *) gen_code_ptr; ++vp)
+ if (*vp == fixup->value)
+ break;
+ if (vp == (uint64_t *) gen_code_ptr) {
+ /* Nope, we need to put the value in the GOT: */
+ *vp = fixup->value;
+ gen_code_ptr += 8;
+ }
+ ia64_imm22(fixup->addr, (long) vp - gp);
+ }
+ /* Keep code ptr aligned. */
+ if ((long) gen_code_ptr & 15)
+ gen_code_ptr += 8;
+ *gen_code_pp = gen_code_ptr;
+}
+#endif
+#endif
+
+#ifdef CONFIG_DYNGEN_OP
+
+#if defined __hppa__
+struct hppa_branch_stub {
+ uint32_t *location;
+ long target;
+ struct hppa_branch_stub *next;
+};
+
+#define HPPA_RECORD_BRANCH(LIST, LOC, TARGET) \
+do { \
+ struct hppa_branch_stub *stub = alloca(sizeof(struct hppa_branch_stub)); \
+ stub->location = LOC; \
+ stub->target = TARGET; \
+ stub->next = LIST; \
+ LIST = stub; \
+} while (0)
+
+static inline void hppa_process_stubs(struct hppa_branch_stub *stub,
+ uint8_t **gen_code_pp)
+{
+ uint32_t *s = (uint32_t *)*gen_code_pp;
+ uint32_t *p = s + 1;
+
+ if (!stub) return;
+
+ for (; stub != NULL; stub = stub->next) {
+ unsigned long l = (unsigned long)p;
+ /* stub:
+ * ldil L'target, %r1
+ * be,n R'target(%sr4,%r1)
+ */
+ *p++ = 0x20200000 | reassemble_21(lrsel(stub->target, 0));
+ *p++ = 0xe0202002 | (reassemble_17(rrsel(stub->target, 0) >> 2));
+ hppa_patch17f(stub->location, l, 0);
+ }
+ /* b,l,n stub,%r0 */
+ *s = 0xe8000002 | reassemble_17((p - s) - 2);
+ *gen_code_pp = (uint8_t *)p;
+}
+#endif /* __hppa__ */
+
+const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr)
+{
+ uint8_t *gen_code_ptr;
+
+#ifdef __hppa__
+ struct hppa_branch_stub *hppa_stubs = NULL;
+#endif
+
+ gen_code_ptr = s->code_ptr;
+ switch(opc) {
+
+/* op.h is dynamically generated by dyngen.c from op.c */
+#include "op.h"
+
+ default:
+ tcg_abort();
+ }
+
+#ifdef __hppa__
+ hppa_process_stubs(hppa_stubs, &gen_code_ptr);
+#endif
+
+ s->code_ptr = gen_code_ptr;
+ return opparam_ptr;
+}
+#endif
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
new file mode 100644
index 0000000..bc6be85
--- /dev/null
+++ b/tcg/tcg-op.h
@@ -0,0 +1,1713 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "tcg.h"
+
+#ifdef CONFIG_DYNGEN_OP
+/* legacy dyngen operations */
+#include "gen-op.h"
+#endif
+
+int gen_new_label(void);
+
+static inline void tcg_gen_op1(int opc, TCGv arg1)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+}
+
+static inline void tcg_gen_op1i(int opc, TCGArg arg1)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = arg1;
+}
+
+static inline void tcg_gen_op2(int opc, TCGv arg1, TCGv arg2)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+}
+
+static inline void tcg_gen_op2i(int opc, TCGv arg1, TCGArg arg2)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = arg2;
+}
+
+static inline void tcg_gen_op2ii(int opc, TCGArg arg1, TCGArg arg2)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = arg1;
+ *gen_opparam_ptr++ = arg2;
+}
+
+static inline void tcg_gen_op3(int opc, TCGv arg1, TCGv arg2, TCGv arg3)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+}
+
+static inline void tcg_gen_op3i(int opc, TCGv arg1, TCGv arg2, TCGArg arg3)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = arg3;
+}
+
+static inline void tcg_gen_op4(int opc, TCGv arg1, TCGv arg2, TCGv arg3,
+ TCGv arg4)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = GET_TCGV(arg4);
+}
+
+static inline void tcg_gen_op4i(int opc, TCGv arg1, TCGv arg2, TCGv arg3,
+ TCGArg arg4)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = arg4;
+}
+
+static inline void tcg_gen_op4ii(int opc, TCGv arg1, TCGv arg2, TCGArg arg3,
+ TCGArg arg4)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = arg3;
+ *gen_opparam_ptr++ = arg4;
+}
+
+static inline void tcg_gen_op5(int opc, TCGv arg1, TCGv arg2,
+ TCGv arg3, TCGv arg4,
+ TCGv arg5)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = GET_TCGV(arg4);
+ *gen_opparam_ptr++ = GET_TCGV(arg5);
+}
+
+static inline void tcg_gen_op5i(int opc, TCGv arg1, TCGv arg2,
+ TCGv arg3, TCGv arg4,
+ TCGArg arg5)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = GET_TCGV(arg4);
+ *gen_opparam_ptr++ = arg5;
+}
+
+static inline void tcg_gen_op6(int opc, TCGv arg1, TCGv arg2,
+ TCGv arg3, TCGv arg4,
+ TCGv arg5, TCGv arg6)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = GET_TCGV(arg4);
+ *gen_opparam_ptr++ = GET_TCGV(arg5);
+ *gen_opparam_ptr++ = GET_TCGV(arg6);
+}
+
+static inline void tcg_gen_op6ii(int opc, TCGv arg1, TCGv arg2,
+ TCGv arg3, TCGv arg4,
+ TCGArg arg5, TCGArg arg6)
+{
+ *gen_opc_ptr++ = opc;
+ *gen_opparam_ptr++ = GET_TCGV(arg1);
+ *gen_opparam_ptr++ = GET_TCGV(arg2);
+ *gen_opparam_ptr++ = GET_TCGV(arg3);
+ *gen_opparam_ptr++ = GET_TCGV(arg4);
+ *gen_opparam_ptr++ = arg5;
+ *gen_opparam_ptr++ = arg6;
+}
+
+static inline void gen_set_label(int n)
+{
+ tcg_gen_op1i(INDEX_op_set_label, n);
+}
+
+static inline void tcg_gen_br(int label)
+{
+ tcg_gen_op1i(INDEX_op_br, label);
+}
+
+static inline void tcg_gen_mov_i32(TCGv ret, TCGv arg)
+{
+ if (GET_TCGV(ret) != GET_TCGV(arg))
+ tcg_gen_op2(INDEX_op_mov_i32, ret, arg);
+}
+
+static inline void tcg_gen_movi_i32(TCGv ret, int32_t arg)
+{
+ tcg_gen_op2i(INDEX_op_movi_i32, ret, arg);
+}
+
+/* helper calls */
+#define TCG_HELPER_CALL_FLAGS 0
+
+static inline void tcg_gen_helper_0_0(void *func)
+{
+ TCGv t0;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 0, NULL, 0, NULL);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_0_1(void *func, TCGv arg)
+{
+ TCGv t0;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 0, NULL, 1, &arg);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_0_2(void *func, TCGv arg1, TCGv arg2)
+{
+ TCGv args[2];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 0, NULL, 2, args);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_0_3(void *func,
+ TCGv arg1, TCGv arg2, TCGv arg3)
+{
+ TCGv args[3];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 0, NULL, 3, args);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_0_4(void *func, TCGv arg1, TCGv arg2,
+ TCGv arg3, TCGv arg4)
+{
+ TCGv args[4];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 0, NULL, 4, args);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_1_0(void *func, TCGv ret)
+{
+ TCGv t0;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 1, &ret, 0, NULL);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_1_1(void *func, TCGv ret, TCGv arg1)
+{
+ TCGv t0;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 1, &ret, 1, &arg1);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_1_2(void *func, TCGv ret,
+ TCGv arg1, TCGv arg2)
+{
+ TCGv args[2];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 1, &ret, 2, args);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_1_3(void *func, TCGv ret,
+ TCGv arg1, TCGv arg2, TCGv arg3)
+{
+ TCGv args[3];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 1, &ret, 3, args);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_helper_1_4(void *func, TCGv ret,
+ TCGv arg1, TCGv arg2, TCGv arg3,
+ TCGv arg4)
+{
+ TCGv args[4];
+ TCGv t0;
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ t0 = tcg_const_ptr((tcg_target_long)func);
+ tcg_gen_call(&tcg_ctx,
+ t0, TCG_HELPER_CALL_FLAGS,
+ 1, &ret, 4, args);
+ tcg_temp_free(t0);
+}
+
+/* 32 bit ops */
+
+static inline void tcg_gen_ld8u_i32(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld8u_i32, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld8s_i32(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld8s_i32, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld16u_i32(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld16u_i32, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld16s_i32(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld16s_i32, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld_i32(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld_i32, ret, arg2, offset);
+}
+
+static inline void tcg_gen_st8_i32(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st8_i32, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st16_i32(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st16_i32, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st_i32(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st_i32, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_add_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_add_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_addi_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_add_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_sub_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_sub_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_subi_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_sub_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_and_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_and_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_andi_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_movi_i32(ret, 0);
+ } else if (arg2 == 0xffffffff) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_and_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_or_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_or_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_ori_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0xffffffff) {
+ tcg_gen_movi_i32(ret, 0xffffffff);
+ } else if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_or_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_xor_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_xor_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_xori_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_xor_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_shl_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_shl_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shli_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_shr_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_shr_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shri_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_sar_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_sar_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_sari_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_brcond_i32(int cond, TCGv arg1, TCGv arg2,
+ int label_index)
+{
+ tcg_gen_op4ii(INDEX_op_brcond_i32, arg1, arg2, cond, label_index);
+}
+
+static inline void tcg_gen_brcondi_i32(int cond, TCGv arg1, int32_t arg2,
+ int label_index)
+{
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_brcond_i32(cond, arg1, t0, label_index);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_mul_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_mul_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_muli_i32(TCGv ret, TCGv arg1, int32_t arg2)
+{
+ TCGv t0 = tcg_const_i32(arg2);
+ tcg_gen_mul_i32(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+#ifdef TCG_TARGET_HAS_div_i32
+static inline void tcg_gen_div_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_div_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_rem_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_rem_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_divu_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_divu_i32, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_remu_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_remu_i32, ret, arg1, arg2);
+}
+#else
+static inline void tcg_gen_div_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ tcg_gen_sari_i32(t0, arg1, 31);
+ tcg_gen_op5(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_rem_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ tcg_gen_sari_i32(t0, arg1, 31);
+ tcg_gen_op5(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_divu_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_op5(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_remu_i32(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_op5(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+#endif
+
+#if TCG_TARGET_REG_BITS == 32
+
+static inline void tcg_gen_mov_i64(TCGv ret, TCGv arg)
+{
+ if (GET_TCGV(ret) != GET_TCGV(arg)) {
+ tcg_gen_mov_i32(ret, arg);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
+ }
+}
+
+static inline void tcg_gen_movi_i64(TCGv ret, int64_t arg)
+{
+ tcg_gen_movi_i32(ret, arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32);
+}
+
+static inline void tcg_gen_ld8u_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld8u_i32(ret, arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ld8s_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld8s_i32(ret, arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ld16u_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld16u_i32(ret, arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ld16s_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld16s_i32(ret, arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ld32u_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld_i32(ret, arg2, offset);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ld32s_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_ld_i32(ret, arg2, offset);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ld_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ /* since arg2 and ret have different types, they cannot be the
+ same temporary */
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
+ tcg_gen_ld_i32(ret, arg2, offset + 4);
+#else
+ tcg_gen_ld_i32(ret, arg2, offset);
+ tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4);
+#endif
+}
+
+static inline void tcg_gen_st8_i64(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_st8_i32(arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st16_i64(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_st16_i32(arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st32_i64(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_st_i32(arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st_i64(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
+ tcg_gen_st_i32(arg1, arg2, offset + 4);
+#else
+ tcg_gen_st_i32(arg1, arg2, offset);
+ tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4);
+#endif
+}
+
+static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op6(INDEX_op_add2_i32, ret, TCGV_HIGH(ret),
+ arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2));
+}
+
+static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_add_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op6(INDEX_op_sub2_i32, ret, TCGV_HIGH(ret),
+ arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2));
+}
+
+static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_and_i32(ret, arg1, arg2);
+ tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+static inline void tcg_gen_andi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_andi_i32(ret, arg1, arg2);
+ tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+}
+
+static inline void tcg_gen_or_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_or_i32(ret, arg1, arg2);
+ tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+static inline void tcg_gen_ori_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_ori_i32(ret, arg1, arg2);
+ tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+}
+
+static inline void tcg_gen_xor_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_xor_i32(ret, arg1, arg2);
+ tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
+}
+
+static inline void tcg_gen_xori_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_xori_i32(ret, arg1, arg2);
+ tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
+}
+
+/* XXX: use generic code when basic block handling is OK or CPU
+ specific code (x86) */
+static inline void tcg_gen_shl_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_shl_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shli_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0);
+}
+
+static inline void tcg_gen_shr_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_shr_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shri_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0);
+}
+
+static inline void tcg_gen_sar_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_sar_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_sari_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1);
+}
+
+static inline void tcg_gen_brcond_i64(int cond, TCGv arg1, TCGv arg2,
+ int label_index)
+{
+ tcg_gen_op6ii(INDEX_op_brcond2_i32,
+ arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2),
+ cond, label_index);
+}
+
+static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_op4(INDEX_op_mulu2_i32, t0, TCGV_HIGH(t0), arg1, arg2);
+
+ tcg_gen_mul_i32(t1, arg1, TCGV_HIGH(arg2));
+ tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
+ tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), arg2);
+ tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1);
+
+ tcg_gen_mov_i64(ret, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_div_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_rem_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_divu_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_helper_1_2(tcg_helper_remu_i64, ret, arg1, arg2);
+}
+
+#else
+
+static inline void tcg_gen_mov_i64(TCGv ret, TCGv arg)
+{
+ if (GET_TCGV(ret) != GET_TCGV(arg))
+ tcg_gen_op2(INDEX_op_mov_i64, ret, arg);
+}
+
+static inline void tcg_gen_movi_i64(TCGv ret, int64_t arg)
+{
+ tcg_gen_op2i(INDEX_op_movi_i64, ret, arg);
+}
+
+static inline void tcg_gen_ld8u_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld8u_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld8s_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld8s_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld16u_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld16u_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld16s_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld16s_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld32u_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld32u_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld32s_i64(TCGv ret, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld32s_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_ld_i64(TCGv ret, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_ld_i64, ret, arg2, offset);
+}
+
+static inline void tcg_gen_st8_i64(TCGv arg1, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st8_i64, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st16_i64(TCGv arg1, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st16_i64, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st32_i64(TCGv arg1, TCGv arg2,
+ tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st32_i64, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_st_i64(TCGv arg1, TCGv arg2, tcg_target_long offset)
+{
+ tcg_gen_op3i(INDEX_op_st_i64, arg1, arg2, offset);
+}
+
+static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_add_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_add_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_sub_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_and_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_andi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_and_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_or_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_or_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_ori_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_or_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_xor_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_xor_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_xori_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_xor_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_shl_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_shl_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shli_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_shl_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_shr_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_shr_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_shri_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_shr_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_sar_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_sar_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_sari_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_sar_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static inline void tcg_gen_brcond_i64(int cond, TCGv arg1, TCGv arg2,
+ int label_index)
+{
+ tcg_gen_op4ii(INDEX_op_brcond_i64, arg1, arg2, cond, label_index);
+}
+
+static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_mul_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+#ifdef TCG_TARGET_HAS_div_i64
+static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_div_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_rem_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_divu_i64, ret, arg1, arg2);
+}
+
+static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_op3(INDEX_op_remu_i64, ret, arg1, arg2);
+}
+#else
+static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_sari_i64(t0, arg1, 63);
+ tcg_gen_op5(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_rem_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_sari_i64(t0, arg1, 63);
+ tcg_gen_op5(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_divu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_movi_i64(t0, 0);
+ tcg_gen_op5(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_movi_i64(t0, 0);
+ tcg_gen_op5(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
+ tcg_temp_free(t0);
+}
+#endif
+
+#endif
+
+static inline void tcg_gen_brcondi_i64(int cond, TCGv arg1, int64_t arg2,
+ int label_index)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_brcond_i64(cond, arg1, t0, label_index);
+ tcg_temp_free(t0);
+}
+
+/***************************************/
+/* optional operations */
+
+static inline void tcg_gen_ext8s_i32(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_ext8s_i32
+ tcg_gen_op2(INDEX_op_ext8s_i32, ret, arg);
+#else
+ tcg_gen_shli_i32(ret, arg, 24);
+ tcg_gen_sari_i32(ret, ret, 24);
+#endif
+}
+
+static inline void tcg_gen_ext16s_i32(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_ext16s_i32
+ tcg_gen_op2(INDEX_op_ext16s_i32, ret, arg);
+#else
+ tcg_gen_shli_i32(ret, arg, 16);
+ tcg_gen_sari_i32(ret, ret, 16);
+#endif
+}
+
+/* These are currently just for convenience.
+ We assume a target will recognise these automatically . */
+static inline void tcg_gen_ext8u_i32(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i32(ret, arg, 0xffu);
+}
+
+static inline void tcg_gen_ext16u_i32(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i32(ret, arg, 0xffffu);
+}
+
+/* Note: we assume the two high bytes are set to zero */
+static inline void tcg_gen_bswap16_i32(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_bswap16_i32
+ tcg_gen_op2(INDEX_op_bswap16_i32, ret, arg);
+#else
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_shri_i32(t0, arg, 8);
+ tcg_gen_andi_i32(t1, arg, 0x000000ff);
+ tcg_gen_shli_i32(t1, t1, 8);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#endif
+}
+
+static inline void tcg_gen_bswap_i32(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_bswap_i32
+ tcg_gen_op2(INDEX_op_bswap_i32, ret, arg);
+#else
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_shli_i32(t0, arg, 24);
+
+ tcg_gen_andi_i32(t1, arg, 0x0000ff00);
+ tcg_gen_shli_i32(t1, t1, 8);
+ tcg_gen_or_i32(t0, t0, t1);
+
+ tcg_gen_shri_i32(t1, arg, 8);
+ tcg_gen_andi_i32(t1, t1, 0x0000ff00);
+ tcg_gen_or_i32(t0, t0, t1);
+
+ tcg_gen_shri_i32(t1, arg, 24);
+ tcg_gen_or_i32(ret, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#endif
+}
+
+#if TCG_TARGET_REG_BITS == 32
+static inline void tcg_gen_ext8s_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_ext8s_i32(ret, arg);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ext16s_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_ext16s_i32(ret, arg);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ext32s_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_ext8u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_ext8u_i32(ret, arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ext16u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_ext16u_i32(ret, arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ext32u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+}
+
+static inline void tcg_gen_extu_i32_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+}
+
+static inline void tcg_gen_ext_i32_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+}
+
+static inline void tcg_gen_bswap_i64(TCGv ret, TCGv arg)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_bswap_i32(t0, arg);
+ tcg_gen_bswap_i32(t1, TCGV_HIGH(arg));
+ tcg_gen_mov_i32(ret, t1);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+#else
+
+static inline void tcg_gen_ext8s_i64(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_ext8s_i64
+ tcg_gen_op2(INDEX_op_ext8s_i64, ret, arg);
+#else
+ tcg_gen_shli_i64(ret, arg, 56);
+ tcg_gen_sari_i64(ret, ret, 56);
+#endif
+}
+
+static inline void tcg_gen_ext16s_i64(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_ext16s_i64
+ tcg_gen_op2(INDEX_op_ext16s_i64, ret, arg);
+#else
+ tcg_gen_shli_i64(ret, arg, 48);
+ tcg_gen_sari_i64(ret, ret, 48);
+#endif
+}
+
+static inline void tcg_gen_ext32s_i64(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_ext32s_i64
+ tcg_gen_op2(INDEX_op_ext32s_i64, ret, arg);
+#else
+ tcg_gen_shli_i64(ret, arg, 32);
+ tcg_gen_sari_i64(ret, ret, 32);
+#endif
+}
+
+static inline void tcg_gen_ext8u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i64(ret, arg, 0xffu);
+}
+
+static inline void tcg_gen_ext16u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i64(ret, arg, 0xffffu);
+}
+
+static inline void tcg_gen_ext32u_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i64(ret, arg, 0xffffffffu);
+}
+
+/* Note: we assume the target supports move between 32 and 64 bit
+ registers. This will probably break MIPS64 targets. */
+static inline void tcg_gen_trunc_i64_i32(TCGv ret, TCGv arg)
+{
+ tcg_gen_mov_i32(ret, arg);
+}
+
+/* Note: we assume the target supports move between 32 and 64 bit
+ registers */
+static inline void tcg_gen_extu_i32_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_andi_i64(ret, arg, 0xffffffffu);
+}
+
+/* Note: we assume the target supports move between 32 and 64 bit
+ registers */
+static inline void tcg_gen_ext_i32_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_ext32s_i64(ret, arg);
+}
+
+static inline void tcg_gen_bswap_i64(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_bswap_i64
+ tcg_gen_op2(INDEX_op_bswap_i64, ret, arg);
+#else
+ TCGv t0, t1;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_shli_i64(t0, arg, 56);
+
+ tcg_gen_andi_i64(t1, arg, 0x0000ff00);
+ tcg_gen_shli_i64(t1, t1, 40);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_andi_i64(t1, arg, 0x00ff0000);
+ tcg_gen_shli_i64(t1, t1, 24);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_andi_i64(t1, arg, 0xff000000);
+ tcg_gen_shli_i64(t1, t1, 8);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 8);
+ tcg_gen_andi_i64(t1, t1, 0xff000000);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 24);
+ tcg_gen_andi_i64(t1, t1, 0x00ff0000);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 40);
+ tcg_gen_andi_i64(t1, t1, 0x0000ff00);
+ tcg_gen_or_i64(t0, t0, t1);
+
+ tcg_gen_shri_i64(t1, arg, 56);
+ tcg_gen_or_i64(ret, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#endif
+}
+
+#endif
+
+static inline void tcg_gen_neg_i32(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_neg_i32
+ tcg_gen_op2(INDEX_op_neg_i32, ret, arg);
+#else
+ TCGv t0 = tcg_const_i32(0);
+ tcg_gen_sub_i32(ret, t0, arg);
+ tcg_temp_free(t0);
+#endif
+}
+
+static inline void tcg_gen_neg_i64(TCGv ret, TCGv arg)
+{
+#ifdef TCG_TARGET_HAS_neg_i64
+ tcg_gen_op2(INDEX_op_neg_i64, ret, arg);
+#else
+ TCGv t0 = tcg_const_i64(0);
+ tcg_gen_sub_i64(ret, t0, arg);
+ tcg_temp_free(t0);
+#endif
+}
+
+static inline void tcg_gen_not_i32(TCGv ret, TCGv arg)
+{
+ tcg_gen_xori_i32(ret, arg, -1);
+}
+
+static inline void tcg_gen_not_i64(TCGv ret, TCGv arg)
+{
+ tcg_gen_xori_i64(ret, arg, -1);
+}
+
+static inline void tcg_gen_discard_i32(TCGv arg)
+{
+ tcg_gen_op1(INDEX_op_discard, arg);
+}
+
+#if TCG_TARGET_REG_BITS == 32
+static inline void tcg_gen_discard_i64(TCGv arg)
+{
+ tcg_gen_discard_i32(arg);
+ tcg_gen_discard_i32(TCGV_HIGH(arg));
+}
+#else
+static inline void tcg_gen_discard_i64(TCGv arg)
+{
+ tcg_gen_op1(INDEX_op_discard, arg);
+}
+#endif
+
+/***************************************/
+/* QEMU specific operations. Their type depend on the QEMU CPU
+ type. */
+#ifndef TARGET_LONG_BITS
+#error must include QEMU headers
+#endif
+
+/* debug info: write the PC of the corresponding QEMU CPU instruction */
+static inline void tcg_gen_debug_insn_start(uint64_t pc)
+{
+ /* XXX: must really use a 32 bit size for TCGArg in all cases */
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+ tcg_gen_op2ii(INDEX_op_debug_insn_start,
+ (uint32_t)(pc), (uint32_t)(pc >> 32));
+#else
+ tcg_gen_op1i(INDEX_op_debug_insn_start, pc);
+#endif
+}
+
+static inline void tcg_gen_exit_tb(tcg_target_long val)
+{
+ tcg_gen_op1i(INDEX_op_exit_tb, val);
+}
+
+static inline void tcg_gen_goto_tb(int idx)
+{
+ tcg_gen_op1i(INDEX_op_goto_tb, idx);
+}
+
+#if TCG_TARGET_REG_BITS == 32
+static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld8u, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld8u, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld8s, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld8s, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld16u, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld16u, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld16s, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld16s, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld32u, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_ld32u, ret, addr, TCGV_HIGH(addr), mem_index);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), ret, 31);
+#endif
+}
+
+static inline void tcg_gen_qemu_ld64(TCGv ret, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op4i(INDEX_op_qemu_ld64, ret, TCGV_HIGH(ret), addr, mem_index);
+#else
+ tcg_gen_op5i(INDEX_op_qemu_ld64, ret, TCGV_HIGH(ret),
+ addr, TCGV_HIGH(addr), mem_index);
+#endif
+}
+
+static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_st8, arg, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_st8, arg, addr, TCGV_HIGH(addr), mem_index);
+#endif
+}
+
+static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_st16, arg, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_st16, arg, addr, TCGV_HIGH(addr), mem_index);
+#endif
+}
+
+static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op3i(INDEX_op_qemu_st32, arg, addr, mem_index);
+#else
+ tcg_gen_op4i(INDEX_op_qemu_st32, arg, addr, TCGV_HIGH(addr), mem_index);
+#endif
+}
+
+static inline void tcg_gen_qemu_st64(TCGv arg, TCGv addr, int mem_index)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_gen_op4i(INDEX_op_qemu_st64, arg, TCGV_HIGH(arg), addr, mem_index);
+#else
+ tcg_gen_op5i(INDEX_op_qemu_st64, arg, TCGV_HIGH(arg),
+ addr, TCGV_HIGH(addr), mem_index);
+#endif
+}
+
+#define tcg_gen_ld_ptr tcg_gen_ld_i32
+#define tcg_gen_discard_ptr tcg_gen_discard_i32
+
+#else /* TCG_TARGET_REG_BITS == 32 */
+
+static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld8u, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld8s, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld16u, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld16s, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld32u, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld32s, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_ld64(TCGv ret, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_ld64, ret, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_st8, arg, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_st16, arg, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_st32, arg, addr, mem_index);
+}
+
+static inline void tcg_gen_qemu_st64(TCGv arg, TCGv addr, int mem_index)
+{
+ tcg_gen_op3i(INDEX_op_qemu_st64, arg, addr, mem_index);
+}
+
+#define tcg_gen_ld_ptr tcg_gen_ld_i64
+#define tcg_gen_discard_ptr tcg_gen_discard_i64
+
+#endif /* TCG_TARGET_REG_BITS != 32 */
+
+#if TARGET_LONG_BITS == 64
+#define TCG_TYPE_TL TCG_TYPE_I64
+#define tcg_gen_movi_tl tcg_gen_movi_i64
+#define tcg_gen_mov_tl tcg_gen_mov_i64
+#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64
+#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64
+#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64
+#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64
+#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64
+#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64
+#define tcg_gen_ld_tl tcg_gen_ld_i64
+#define tcg_gen_st8_tl tcg_gen_st8_i64
+#define tcg_gen_st16_tl tcg_gen_st16_i64
+#define tcg_gen_st32_tl tcg_gen_st32_i64
+#define tcg_gen_st_tl tcg_gen_st_i64
+#define tcg_gen_add_tl tcg_gen_add_i64
+#define tcg_gen_addi_tl tcg_gen_addi_i64
+#define tcg_gen_sub_tl tcg_gen_sub_i64
+#define tcg_gen_neg_tl tcg_gen_neg_i64
+#define tcg_gen_subi_tl tcg_gen_subi_i64
+#define tcg_gen_and_tl tcg_gen_and_i64
+#define tcg_gen_andi_tl tcg_gen_andi_i64
+#define tcg_gen_or_tl tcg_gen_or_i64
+#define tcg_gen_ori_tl tcg_gen_ori_i64
+#define tcg_gen_xor_tl tcg_gen_xor_i64
+#define tcg_gen_xori_tl tcg_gen_xori_i64
+#define tcg_gen_not_tl tcg_gen_not_i64
+#define tcg_gen_shl_tl tcg_gen_shl_i64
+#define tcg_gen_shli_tl tcg_gen_shli_i64
+#define tcg_gen_shr_tl tcg_gen_shr_i64
+#define tcg_gen_shri_tl tcg_gen_shri_i64
+#define tcg_gen_sar_tl tcg_gen_sar_i64
+#define tcg_gen_sari_tl tcg_gen_sari_i64
+#define tcg_gen_brcond_tl tcg_gen_brcond_i64
+#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64
+#define tcg_gen_mul_tl tcg_gen_mul_i64
+#define tcg_gen_muli_tl tcg_gen_muli_i64
+#define tcg_gen_discard_tl tcg_gen_discard_i64
+#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32
+#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64
+#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64
+#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64
+#define tcg_gen_extu_tl_i64 tcg_gen_mov_i64
+#define tcg_gen_ext_tl_i64 tcg_gen_mov_i64
+#define tcg_gen_ext8u_tl tcg_gen_ext8u_i64
+#define tcg_gen_ext8s_tl tcg_gen_ext8s_i64
+#define tcg_gen_ext16u_tl tcg_gen_ext16u_i64
+#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64
+#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64
+#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64
+#define tcg_const_tl tcg_const_i64
+#else
+#define TCG_TYPE_TL TCG_TYPE_I32
+#define tcg_gen_movi_tl tcg_gen_movi_i32
+#define tcg_gen_mov_tl tcg_gen_mov_i32
+#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32
+#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32
+#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32
+#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32
+#define tcg_gen_ld32u_tl tcg_gen_ld_i32
+#define tcg_gen_ld32s_tl tcg_gen_ld_i32
+#define tcg_gen_ld_tl tcg_gen_ld_i32
+#define tcg_gen_st8_tl tcg_gen_st8_i32
+#define tcg_gen_st16_tl tcg_gen_st16_i32
+#define tcg_gen_st32_tl tcg_gen_st_i32
+#define tcg_gen_st_tl tcg_gen_st_i32
+#define tcg_gen_add_tl tcg_gen_add_i32
+#define tcg_gen_addi_tl tcg_gen_addi_i32
+#define tcg_gen_sub_tl tcg_gen_sub_i32
+#define tcg_gen_neg_tl tcg_gen_neg_i32
+#define tcg_gen_subi_tl tcg_gen_subi_i32
+#define tcg_gen_and_tl tcg_gen_and_i32
+#define tcg_gen_andi_tl tcg_gen_andi_i32
+#define tcg_gen_or_tl tcg_gen_or_i32
+#define tcg_gen_ori_tl tcg_gen_ori_i32
+#define tcg_gen_xor_tl tcg_gen_xor_i32
+#define tcg_gen_xori_tl tcg_gen_xori_i32
+#define tcg_gen_not_tl tcg_gen_not_i32
+#define tcg_gen_shl_tl tcg_gen_shl_i32
+#define tcg_gen_shli_tl tcg_gen_shli_i32
+#define tcg_gen_shr_tl tcg_gen_shr_i32
+#define tcg_gen_shri_tl tcg_gen_shri_i32
+#define tcg_gen_sar_tl tcg_gen_sar_i32
+#define tcg_gen_sari_tl tcg_gen_sari_i32
+#define tcg_gen_brcond_tl tcg_gen_brcond_i32
+#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32
+#define tcg_gen_mul_tl tcg_gen_mul_i32
+#define tcg_gen_muli_tl tcg_gen_muli_i32
+#define tcg_gen_discard_tl tcg_gen_discard_i32
+#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32
+#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32
+#define tcg_gen_extu_i32_tl tcg_gen_mov_i32
+#define tcg_gen_ext_i32_tl tcg_gen_mov_i32
+#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64
+#define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64
+#define tcg_gen_ext8u_tl tcg_gen_ext8u_i32
+#define tcg_gen_ext8s_tl tcg_gen_ext8s_i32
+#define tcg_gen_ext16u_tl tcg_gen_ext16u_i32
+#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32
+#define tcg_gen_ext32u_tl tcg_gen_mov_i32
+#define tcg_gen_ext32s_tl tcg_gen_mov_i32
+#define tcg_const_tl tcg_const_i32
+#endif
+
+#if TCG_TARGET_REG_BITS == 32
+#define tcg_gen_add_ptr tcg_gen_add_i32
+#define tcg_gen_addi_ptr tcg_gen_addi_i32
+#define tcg_gen_ext_i32_ptr tcg_gen_mov_i32
+#else /* TCG_TARGET_REG_BITS == 32 */
+#define tcg_gen_add_ptr tcg_gen_add_i64
+#define tcg_gen_addi_ptr tcg_gen_addi_i64
+#define tcg_gen_ext_i32_ptr tcg_gen_ext_i32_i64
+#endif /* TCG_TARGET_REG_BITS != 32 */
+
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
new file mode 100644
index 0000000..31ae550
--- /dev/null
+++ b/tcg/tcg-opc.h
@@ -0,0 +1,238 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifdef CONFIG_DYNGEN_OP
+#include "dyngen-opc.h"
+#endif
+
+#ifndef DEF2
+#define DEF2(name, oargs, iargs, cargs, flags) DEF(name, oargs + iargs + cargs, 0)
+#endif
+
+/* predefined ops */
+DEF2(end, 0, 0, 0, 0) /* must be kept first */
+DEF2(nop, 0, 0, 0, 0)
+DEF2(nop1, 0, 0, 1, 0)
+DEF2(nop2, 0, 0, 2, 0)
+DEF2(nop3, 0, 0, 3, 0)
+DEF2(nopn, 0, 0, 1, 0) /* variable number of parameters */
+
+DEF2(discard, 1, 0, 0, 0)
+
+DEF2(set_label, 0, 0, 1, 0)
+DEF2(call, 0, 1, 2, TCG_OPF_SIDE_EFFECTS) /* variable number of parameters */
+DEF2(jmp, 0, 1, 0, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+DEF2(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+
+DEF2(mov_i32, 1, 1, 0, 0)
+DEF2(movi_i32, 1, 0, 1, 0)
+/* load/store */
+DEF2(ld8u_i32, 1, 1, 1, 0)
+DEF2(ld8s_i32, 1, 1, 1, 0)
+DEF2(ld16u_i32, 1, 1, 1, 0)
+DEF2(ld16s_i32, 1, 1, 1, 0)
+DEF2(ld_i32, 1, 1, 1, 0)
+DEF2(st8_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+DEF2(st16_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+DEF2(st_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+/* arith */
+DEF2(add_i32, 1, 2, 0, 0)
+DEF2(sub_i32, 1, 2, 0, 0)
+DEF2(mul_i32, 1, 2, 0, 0)
+#ifdef TCG_TARGET_HAS_div_i32
+DEF2(div_i32, 1, 2, 0, 0)
+DEF2(divu_i32, 1, 2, 0, 0)
+DEF2(rem_i32, 1, 2, 0, 0)
+DEF2(remu_i32, 1, 2, 0, 0)
+#else
+DEF2(div2_i32, 2, 3, 0, 0)
+DEF2(divu2_i32, 2, 3, 0, 0)
+#endif
+DEF2(and_i32, 1, 2, 0, 0)
+DEF2(or_i32, 1, 2, 0, 0)
+DEF2(xor_i32, 1, 2, 0, 0)
+/* shifts */
+DEF2(shl_i32, 1, 2, 0, 0)
+DEF2(shr_i32, 1, 2, 0, 0)
+DEF2(sar_i32, 1, 2, 0, 0)
+
+DEF2(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+#if TCG_TARGET_REG_BITS == 32
+DEF2(add2_i32, 2, 4, 0, 0)
+DEF2(sub2_i32, 2, 4, 0, 0)
+DEF2(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+DEF2(mulu2_i32, 2, 2, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_ext8s_i32
+DEF2(ext8s_i32, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_ext16s_i32
+DEF2(ext16s_i32, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_bswap_i32
+DEF2(bswap_i32, 1, 1, 0, 0)
+#endif
+
+#if TCG_TARGET_REG_BITS == 64
+DEF2(mov_i64, 1, 1, 0, 0)
+DEF2(movi_i64, 1, 0, 1, 0)
+/* load/store */
+DEF2(ld8u_i64, 1, 1, 1, 0)
+DEF2(ld8s_i64, 1, 1, 1, 0)
+DEF2(ld16u_i64, 1, 1, 1, 0)
+DEF2(ld16s_i64, 1, 1, 1, 0)
+DEF2(ld32u_i64, 1, 1, 1, 0)
+DEF2(ld32s_i64, 1, 1, 1, 0)
+DEF2(ld_i64, 1, 1, 1, 0)
+DEF2(st8_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+DEF2(st16_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+DEF2(st32_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+DEF2(st_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)
+/* arith */
+DEF2(add_i64, 1, 2, 0, 0)
+DEF2(sub_i64, 1, 2, 0, 0)
+DEF2(mul_i64, 1, 2, 0, 0)
+#ifdef TCG_TARGET_HAS_div_i64
+DEF2(div_i64, 1, 2, 0, 0)
+DEF2(divu_i64, 1, 2, 0, 0)
+DEF2(rem_i64, 1, 2, 0, 0)
+DEF2(remu_i64, 1, 2, 0, 0)
+#else
+DEF2(div2_i64, 2, 3, 0, 0)
+DEF2(divu2_i64, 2, 3, 0, 0)
+#endif
+DEF2(and_i64, 1, 2, 0, 0)
+DEF2(or_i64, 1, 2, 0, 0)
+DEF2(xor_i64, 1, 2, 0, 0)
+/* shifts */
+DEF2(shl_i64, 1, 2, 0, 0)
+DEF2(shr_i64, 1, 2, 0, 0)
+DEF2(sar_i64, 1, 2, 0, 0)
+
+DEF2(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+#ifdef TCG_TARGET_HAS_ext8s_i64
+DEF2(ext8s_i64, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_ext16s_i64
+DEF2(ext16s_i64, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_ext32s_i64
+DEF2(ext32s_i64, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_bswap_i64
+DEF2(bswap_i64, 1, 1, 0, 0)
+#endif
+#endif
+#ifdef TCG_TARGET_HAS_neg_i32
+DEF2(neg_i32, 1, 1, 0, 0)
+#endif
+#ifdef TCG_TARGET_HAS_neg_i64
+DEF2(neg_i64, 1, 1, 0, 0)
+#endif
+
+/* QEMU specific */
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+DEF2(debug_insn_start, 0, 0, 2, 0)
+#else
+DEF2(debug_insn_start, 0, 0, 1, 0)
+#endif
+DEF2(exit_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+DEF2(goto_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)
+/* Note: even if TARGET_LONG_BITS is not defined, the INDEX_op
+ constants must be defined */
+#if TCG_TARGET_REG_BITS == 32
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld8u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld8s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld16u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld16s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld32u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld32s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_ld64, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_ld64, 2, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_st8, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_st16, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_st32, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+#if TARGET_LONG_BITS == 32
+DEF2(qemu_st64, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#else
+DEF2(qemu_st64, 0, 4, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+#endif
+
+#else /* TCG_TARGET_REG_BITS == 32 */
+
+DEF2(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_ld64, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+
+DEF2(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+DEF2(qemu_st64, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
+
+#endif /* TCG_TARGET_REG_BITS != 32 */
+
+#undef DEF2
diff --git a/tcg/tcg-runtime.c b/tcg/tcg-runtime.c
new file mode 100644
index 0000000..575da43
--- /dev/null
+++ b/tcg/tcg-runtime.c
@@ -0,0 +1,68 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "config.h"
+#include "osdep.h"
+#include "tcg.h"
+
+int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2)
+{
+ return arg1 << arg2;
+}
+
+int64_t tcg_helper_shr_i64(int64_t arg1, int64_t arg2)
+{
+ return (uint64_t)arg1 >> arg2;
+}
+
+int64_t tcg_helper_sar_i64(int64_t arg1, int64_t arg2)
+{
+ return arg1 >> arg2;
+}
+
+int64_t tcg_helper_div_i64(int64_t arg1, int64_t arg2)
+{
+ return arg1 / arg2;
+}
+
+int64_t tcg_helper_rem_i64(int64_t arg1, int64_t arg2)
+{
+ return arg1 % arg2;
+}
+
+uint64_t tcg_helper_divu_i64(uint64_t arg1, uint64_t arg2)
+{
+ return arg1 / arg2;
+}
+
+uint64_t tcg_helper_remu_i64(uint64_t arg1, uint64_t arg2)
+{
+ return arg1 % arg2;
+}
+
diff --git a/tcg/tcg.c b/tcg/tcg.c
new file mode 100644
index 0000000..1b7bf5c
--- /dev/null
+++ b/tcg/tcg.c
@@ -0,0 +1,2081 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* define it to suppress various consistency checks (faster) */
+#define NDEBUG
+
+/* define it to use liveness analysis (better code) */
+#define USE_LIVENESS_ANALYSIS
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#ifdef _WIN32
+#include <malloc.h>
+#endif
+
+#include "config.h"
+#include "qemu-common.h"
+
+/* Note: the long term plan is to reduce the dependancies on the QEMU
+ CPU definitions. Currently they are used for qemu_ld/st
+ instructions */
+#define NO_CPU_IO_DEFS
+#include "cpu.h"
+#include "exec-all.h"
+
+#include "tcg-op.h"
+#include "elf.h"
+
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend);
+
+TCGOpDef tcg_op_defs[] = {
+#define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size },
+#define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 },
+#include "tcg-opc.h"
+#undef DEF
+#undef DEF2
+};
+
+TCGRegSet tcg_target_available_regs[2];
+TCGRegSet tcg_target_call_clobber_regs;
+
+/* XXX: move that inside the context */
+uint16_t *gen_opc_ptr;
+TCGArg *gen_opparam_ptr;
+
+static inline void tcg_out8(TCGContext *s, uint8_t v)
+{
+ *s->code_ptr++ = v;
+}
+
+static inline void tcg_out16(TCGContext *s, uint16_t v)
+{
+ *(uint16_t *)s->code_ptr = v;
+ s->code_ptr += 2;
+}
+
+static inline void tcg_out32(TCGContext *s, uint32_t v)
+{
+ *(uint32_t *)s->code_ptr = v;
+ s->code_ptr += 4;
+}
+
+/* label relocation processing */
+
+void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
+ int label_index, long addend)
+{
+ TCGLabel *l;
+ TCGRelocation *r;
+
+ l = &s->labels[label_index];
+ if (l->has_value) {
+ /* FIXME: This may break relocations on RISC targets that
+ modify instruction fields in place. The caller may not have
+ written the initial value. */
+ patch_reloc(code_ptr, type, l->u.value, addend);
+ } else {
+ /* add a new relocation entry */
+ r = tcg_malloc(sizeof(TCGRelocation));
+ r->type = type;
+ r->ptr = code_ptr;
+ r->addend = addend;
+ r->next = l->u.first_reloc;
+ l->u.first_reloc = r;
+ }
+}
+
+static void tcg_out_label(TCGContext *s, int label_index,
+ tcg_target_long value)
+{
+ TCGLabel *l;
+ TCGRelocation *r;
+
+ l = &s->labels[label_index];
+ if (l->has_value)
+ tcg_abort();
+ r = l->u.first_reloc;
+ while (r != NULL) {
+ patch_reloc(r->ptr, r->type, value, r->addend);
+ r = r->next;
+ }
+ l->has_value = 1;
+ l->u.value = value;
+}
+
+int gen_new_label(void)
+{
+ TCGContext *s = &tcg_ctx;
+ int idx;
+ TCGLabel *l;
+
+ if (s->nb_labels >= TCG_MAX_LABELS)
+ tcg_abort();
+ idx = s->nb_labels++;
+ l = &s->labels[idx];
+ l->has_value = 0;
+ l->u.first_reloc = NULL;
+ return idx;
+}
+
+#include "tcg-target.c"
+
+/* pool based memory allocation */
+void *tcg_malloc_internal(TCGContext *s, int size)
+{
+ TCGPool *p;
+ int pool_size;
+
+ if (size > TCG_POOL_CHUNK_SIZE) {
+ /* big malloc: insert a new pool (XXX: could optimize) */
+ p = qemu_malloc(sizeof(TCGPool) + size);
+ p->size = size;
+ if (s->pool_current)
+ s->pool_current->next = p;
+ else
+ s->pool_first = p;
+ p->next = s->pool_current;
+ } else {
+ p = s->pool_current;
+ if (!p) {
+ p = s->pool_first;
+ if (!p)
+ goto new_pool;
+ } else {
+ if (!p->next) {
+ new_pool:
+ pool_size = TCG_POOL_CHUNK_SIZE;
+ p = qemu_malloc(sizeof(TCGPool) + pool_size);
+ p->size = pool_size;
+ p->next = NULL;
+ if (s->pool_current)
+ s->pool_current->next = p;
+ else
+ s->pool_first = p;
+ } else {
+ p = p->next;
+ }
+ }
+ }
+ s->pool_current = p;
+ s->pool_cur = p->data + size;
+ s->pool_end = p->data + p->size;
+ return p->data;
+}
+
+void tcg_pool_reset(TCGContext *s)
+{
+ s->pool_cur = s->pool_end = NULL;
+ s->pool_current = NULL;
+}
+
+void tcg_context_init(TCGContext *s)
+{
+ int op, total_args, n;
+ TCGOpDef *def;
+ TCGArgConstraint *args_ct;
+ int *sorted_args;
+
+ memset(s, 0, sizeof(*s));
+ s->temps = s->static_temps;
+ s->nb_globals = 0;
+
+ /* Count total number of arguments and allocate the corresponding
+ space */
+ total_args = 0;
+ for(op = 0; op < NB_OPS; op++) {
+ def = &tcg_op_defs[op];
+ n = def->nb_iargs + def->nb_oargs;
+ total_args += n;
+ }
+
+ args_ct = qemu_malloc(sizeof(TCGArgConstraint) * total_args);
+ sorted_args = qemu_malloc(sizeof(int) * total_args);
+
+ for(op = 0; op < NB_OPS; op++) {
+ def = &tcg_op_defs[op];
+ def->args_ct = args_ct;
+ def->sorted_args = sorted_args;
+ n = def->nb_iargs + def->nb_oargs;
+ sorted_args += n;
+ args_ct += n;
+ }
+
+ tcg_target_init(s);
+
+ /* init global prologue and epilogue */
+ s->code_buf = code_gen_prologue;
+ s->code_ptr = s->code_buf;
+ tcg_target_qemu_prologue(s);
+ flush_icache_range((unsigned long)s->code_buf,
+ (unsigned long)s->code_ptr);
+}
+
+void tcg_set_frame(TCGContext *s, int reg,
+ tcg_target_long start, tcg_target_long size)
+{
+ s->frame_start = start;
+ s->frame_end = start + size;
+ s->frame_reg = reg;
+}
+
+void tcg_func_start(TCGContext *s)
+{
+ int i;
+ tcg_pool_reset(s);
+ s->nb_temps = s->nb_globals;
+ for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
+ s->first_free_temp[i] = -1;
+ s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
+ s->nb_labels = 0;
+ s->current_frame_offset = s->frame_start;
+
+ gen_opc_ptr = gen_opc_buf;
+ gen_opparam_ptr = gen_opparam_buf;
+}
+
+static inline void tcg_temp_alloc(TCGContext *s, int n)
+{
+ if (n > TCG_MAX_TEMPS)
+ tcg_abort();
+}
+
+TCGv tcg_global_reg_new(TCGType type, int reg, const char *name)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx;
+
+#if TCG_TARGET_REG_BITS == 32
+ if (type != TCG_TYPE_I32)
+ tcg_abort();
+#endif
+ if (tcg_regset_test_reg(s->reserved_regs, reg))
+ tcg_abort();
+ idx = s->nb_globals;
+ tcg_temp_alloc(s, s->nb_globals + 1);
+ ts = &s->temps[s->nb_globals];
+ ts->base_type = type;
+ ts->type = type;
+ ts->fixed_reg = 1;
+ ts->reg = reg;
+ ts->name = name;
+ s->nb_globals++;
+ tcg_regset_set_reg(s->reserved_regs, reg);
+ return MAKE_TCGV(idx);
+}
+
+#if TCG_TARGET_REG_BITS == 32
+/* temporary hack to avoid register shortage for tcg_qemu_st64() */
+TCGv tcg_global_reg2_new_hack(TCGType type, int reg1, int reg2,
+ const char *name)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx;
+ char buf[64];
+
+ if (type != TCG_TYPE_I64)
+ tcg_abort();
+ idx = s->nb_globals;
+ tcg_temp_alloc(s, s->nb_globals + 2);
+ ts = &s->temps[s->nb_globals];
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 1;
+ ts->reg = reg1;
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_0");
+ ts->name = strdup(buf);
+
+ ts++;
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 1;
+ ts->reg = reg2;
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_1");
+ ts->name = strdup(buf);
+
+ s->nb_globals += 2;
+ return MAKE_TCGV(idx);
+}
+#endif
+
+TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset,
+ const char *name)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx;
+
+ idx = s->nb_globals;
+#if TCG_TARGET_REG_BITS == 32
+ if (type == TCG_TYPE_I64) {
+ char buf[64];
+ tcg_temp_alloc(s, s->nb_globals + 2);
+ ts = &s->temps[s->nb_globals];
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 0;
+ ts->mem_allocated = 1;
+ ts->mem_reg = reg;
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ ts->mem_offset = offset + 4;
+#else
+ ts->mem_offset = offset;
+#endif
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_0");
+ ts->name = strdup(buf);
+ ts++;
+
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->fixed_reg = 0;
+ ts->mem_allocated = 1;
+ ts->mem_reg = reg;
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ ts->mem_offset = offset;
+#else
+ ts->mem_offset = offset + 4;
+#endif
+ pstrcpy(buf, sizeof(buf), name);
+ pstrcat(buf, sizeof(buf), "_1");
+ ts->name = strdup(buf);
+
+ s->nb_globals += 2;
+ } else
+#endif
+ {
+ tcg_temp_alloc(s, s->nb_globals + 1);
+ ts = &s->temps[s->nb_globals];
+ ts->base_type = type;
+ ts->type = type;
+ ts->fixed_reg = 0;
+ ts->mem_allocated = 1;
+ ts->mem_reg = reg;
+ ts->mem_offset = offset;
+ ts->name = name;
+ s->nb_globals++;
+ }
+ return MAKE_TCGV(idx);
+}
+
+TCGv tcg_temp_new_internal(TCGType type, int temp_local)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx, k;
+
+ k = type;
+ if (temp_local)
+ k += TCG_TYPE_COUNT;
+ idx = s->first_free_temp[k];
+ if (idx != -1) {
+ /* There is already an available temp with the
+ right type */
+ ts = &s->temps[idx];
+ s->first_free_temp[k] = ts->next_free_temp;
+ ts->temp_allocated = 1;
+ assert(ts->temp_local == temp_local);
+ } else {
+ idx = s->nb_temps;
+#if TCG_TARGET_REG_BITS == 32
+ if (type == TCG_TYPE_I64) {
+ tcg_temp_alloc(s, s->nb_temps + 2);
+ ts = &s->temps[s->nb_temps];
+ ts->base_type = type;
+ ts->type = TCG_TYPE_I32;
+ ts->temp_allocated = 1;
+ ts->temp_local = temp_local;
+ ts->name = NULL;
+ ts++;
+ ts->base_type = TCG_TYPE_I32;
+ ts->type = TCG_TYPE_I32;
+ ts->temp_allocated = 1;
+ ts->temp_local = temp_local;
+ ts->name = NULL;
+ s->nb_temps += 2;
+ } else
+#endif
+ {
+ tcg_temp_alloc(s, s->nb_temps + 1);
+ ts = &s->temps[s->nb_temps];
+ ts->base_type = type;
+ ts->type = type;
+ ts->temp_allocated = 1;
+ ts->temp_local = temp_local;
+ ts->name = NULL;
+ s->nb_temps++;
+ }
+ }
+ return MAKE_TCGV(idx);
+}
+
+void tcg_temp_free(TCGv arg)
+{
+ TCGContext *s = &tcg_ctx;
+ TCGTemp *ts;
+ int idx = GET_TCGV(arg);
+ int k;
+
+ assert(idx >= s->nb_globals && idx < s->nb_temps);
+ ts = &s->temps[idx];
+ assert(ts->temp_allocated != 0);
+ ts->temp_allocated = 0;
+ k = ts->base_type;
+ if (ts->temp_local)
+ k += TCG_TYPE_COUNT;
+ ts->next_free_temp = s->first_free_temp[k];
+ s->first_free_temp[k] = idx;
+}
+
+
+TCGv tcg_const_i32(int32_t val)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ tcg_gen_movi_i32(t0, val);
+ return t0;
+}
+
+TCGv tcg_const_i64(int64_t val)
+{
+ TCGv t0;
+ t0 = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_movi_i64(t0, val);
+ return t0;
+}
+
+void tcg_register_helper(void *func, const char *name)
+{
+ TCGContext *s = &tcg_ctx;
+ int n;
+ if ((s->nb_helpers + 1) > s->allocated_helpers) {
+ n = s->allocated_helpers;
+ if (n == 0) {
+ n = 4;
+ } else {
+ n *= 2;
+ }
+ s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
+ s->allocated_helpers = n;
+ }
+ s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
+ s->helpers[s->nb_helpers].name = name;
+ s->nb_helpers++;
+}
+
+static inline TCGType tcg_get_base_type(TCGContext *s, TCGv arg)
+{
+ return s->temps[GET_TCGV(arg)].base_type;
+}
+
+static void tcg_gen_call_internal(TCGContext *s, TCGv func,
+ unsigned int flags,
+ unsigned int nb_rets, const TCGv *rets,
+ unsigned int nb_params, const TCGv *params)
+{
+ int i;
+ *gen_opc_ptr++ = INDEX_op_call;
+ *gen_opparam_ptr++ = (nb_rets << 16) | (nb_params + 1);
+ for(i = 0; i < nb_rets; i++) {
+ *gen_opparam_ptr++ = GET_TCGV(rets[i]);
+ }
+ for(i = 0; i < nb_params; i++) {
+ *gen_opparam_ptr++ = GET_TCGV(params[i]);
+ }
+ *gen_opparam_ptr++ = GET_TCGV(func);
+
+ *gen_opparam_ptr++ = flags;
+ /* total parameters, needed to go backward in the instruction stream */
+ *gen_opparam_ptr++ = 1 + nb_rets + nb_params + 3;
+}
+
+
+#if TCG_TARGET_REG_BITS < 64
+/* Note: we convert the 64 bit args to 32 bit and do some alignment
+ and endian swap. Maybe it would be better to do the alignment
+ and endian swap in tcg_reg_alloc_call(). */
+void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,
+ unsigned int nb_rets, const TCGv *rets,
+ unsigned int nb_params, const TCGv *args1)
+{
+ TCGv ret, *args2, rets_2[2], arg;
+ int j, i, call_type;
+
+ if (nb_rets == 1) {
+ ret = rets[0];
+ if (tcg_get_base_type(s, ret) == TCG_TYPE_I64) {
+ nb_rets = 2;
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ rets_2[0] = TCGV_HIGH(ret);
+ rets_2[1] = ret;
+#else
+ rets_2[0] = ret;
+ rets_2[1] = TCGV_HIGH(ret);
+#endif
+ rets = rets_2;
+ }
+ }
+ args2 = alloca((nb_params * 3) * sizeof(TCGv));
+ j = 0;
+ call_type = (flags & TCG_CALL_TYPE_MASK);
+ for(i = 0; i < nb_params; i++) {
+ arg = args1[i];
+ if (tcg_get_base_type(s, arg) == TCG_TYPE_I64) {
+#ifdef TCG_TARGET_I386
+ /* REGPARM case: if the third parameter is 64 bit, it is
+ allocated on the stack */
+ if (j == 2 && call_type == TCG_CALL_TYPE_REGPARM) {
+ call_type = TCG_CALL_TYPE_REGPARM_2;
+ flags = (flags & ~TCG_CALL_TYPE_MASK) | call_type;
+ }
+ args2[j++] = arg;
+ args2[j++] = TCGV_HIGH(arg);
+#else
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ /* some targets want aligned 64 bit args */
+ if (j & 1) {
+ args2[j++] = TCG_CALL_DUMMY_ARG;
+ }
+#endif
+#ifdef TCG_TARGET_WORDS_BIGENDIAN
+ args2[j++] = TCGV_HIGH(arg);
+ args2[j++] = arg;
+#else
+ args2[j++] = arg;
+ args2[j++] = TCGV_HIGH(arg);
+#endif
+#endif
+ } else {
+ args2[j++] = arg;
+ }
+ }
+ tcg_gen_call_internal(s, func, flags,
+ nb_rets, rets, j, args2);
+}
+#else
+void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,
+ unsigned int nb_rets, const TCGv *rets,
+ unsigned int nb_params, const TCGv *args1)
+{
+ tcg_gen_call_internal(s, func, flags,
+ nb_rets, rets, nb_params, args1);
+}
+#endif
+
+#if TCG_TARGET_REG_BITS == 32
+void tcg_gen_shifti_i64(TCGv ret, TCGv arg1,
+ int c, int right, int arith)
+{
+ if (c == 0) {
+ tcg_gen_mov_i32(ret, arg1);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ } else if (c >= 32) {
+ c -= 32;
+ if (right) {
+ if (arith) {
+ tcg_gen_sari_i32(ret, TCGV_HIGH(arg1), c);
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
+ } else {
+ tcg_gen_shri_i32(ret, TCGV_HIGH(arg1), c);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ }
+ } else {
+ tcg_gen_shli_i32(TCGV_HIGH(ret), arg1, c);
+ tcg_gen_movi_i32(ret, 0);
+ }
+ } else {
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new(TCG_TYPE_I32);
+ t1 = tcg_temp_new(TCG_TYPE_I32);
+ if (right) {
+ tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
+ if (arith)
+ tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
+ else
+ tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
+ tcg_gen_shri_i32(ret, arg1, c);
+ tcg_gen_or_i32(ret, ret, t0);
+ tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
+ } else {
+ tcg_gen_shri_i32(t0, arg1, 32 - c);
+ /* Note: ret can be the same as arg1, so we use t1 */
+ tcg_gen_shli_i32(t1, arg1, c);
+ tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
+ tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
+ tcg_gen_mov_i32(ret, t1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+}
+#endif
+
+static void tcg_reg_alloc_start(TCGContext *s)
+{
+ int i;
+ TCGTemp *ts;
+ for(i = 0; i < s->nb_globals; i++) {
+ ts = &s->temps[i];
+ if (ts->fixed_reg) {
+ ts->val_type = TEMP_VAL_REG;
+ } else {
+ ts->val_type = TEMP_VAL_MEM;
+ }
+ }
+ for(i = s->nb_globals; i < s->nb_temps; i++) {
+ ts = &s->temps[i];
+ ts->val_type = TEMP_VAL_DEAD;
+ ts->mem_allocated = 0;
+ ts->fixed_reg = 0;
+ }
+ for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
+ s->reg_to_temp[i] = -1;
+ }
+}
+
+static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
+ int idx)
+{
+ TCGTemp *ts;
+
+ ts = &s->temps[idx];
+ if (idx < s->nb_globals) {
+ pstrcpy(buf, buf_size, ts->name);
+ } else {
+ if (ts->temp_local)
+ snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
+ else
+ snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
+ }
+ return buf;
+}
+
+char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGv arg)
+{
+ return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV(arg));
+}
+
+static int helper_cmp(const void *p1, const void *p2)
+{
+ const TCGHelperInfo *th1 = p1;
+ const TCGHelperInfo *th2 = p2;
+ if (th1->func < th2->func)
+ return -1;
+ else if (th1->func == th2->func)
+ return 0;
+ else
+ return 1;
+}
+
+/* find helper definition (Note: A hash table would be better) */
+static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
+{
+ int m, m_min, m_max;
+ TCGHelperInfo *th;
+ tcg_target_ulong v;
+
+ if (unlikely(!s->helpers_sorted)) {
+ qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
+ helper_cmp);
+ s->helpers_sorted = 1;
+ }
+
+ /* binary search */
+ m_min = 0;
+ m_max = s->nb_helpers - 1;
+ while (m_min <= m_max) {
+ m = (m_min + m_max) >> 1;
+ th = &s->helpers[m];
+ v = th->func;
+ if (v == val)
+ return th;
+ else if (val < v) {
+ m_max = m - 1;
+ } else {
+ m_min = m + 1;
+ }
+ }
+ return NULL;
+}
+
+static const char * const cond_name[] =
+{
+ [TCG_COND_EQ] = "eq",
+ [TCG_COND_NE] = "ne",
+ [TCG_COND_LT] = "lt",
+ [TCG_COND_GE] = "ge",
+ [TCG_COND_LE] = "le",
+ [TCG_COND_GT] = "gt",
+ [TCG_COND_LTU] = "ltu",
+ [TCG_COND_GEU] = "geu",
+ [TCG_COND_LEU] = "leu",
+ [TCG_COND_GTU] = "gtu"
+};
+
+void tcg_dump_ops(TCGContext *s, FILE *outfile)
+{
+ const uint16_t *opc_ptr;
+ const TCGArg *args;
+ TCGArg arg;
+ int c, i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
+ const TCGOpDef *def;
+ char buf[128];
+
+ first_insn = 1;
+ opc_ptr = gen_opc_buf;
+ args = gen_opparam_buf;
+ while (opc_ptr < gen_opc_ptr) {
+ c = *opc_ptr++;
+ def = &tcg_op_defs[c];
+ if (c == INDEX_op_debug_insn_start) {
+ uint64_t pc;
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
+ pc = ((uint64_t)args[1] << 32) | args[0];
+#else
+ pc = args[0];
+#endif
+ if (!first_insn)
+ fprintf(outfile, "\n");
+ fprintf(outfile, " ---- 0x%" PRIx64, pc);
+ first_insn = 0;
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ nb_cargs = def->nb_cargs;
+ } else if (c == INDEX_op_call) {
+ TCGArg arg;
+
+ /* variable number of arguments */
+ arg = *args++;
+ nb_oargs = arg >> 16;
+ nb_iargs = arg & 0xffff;
+ nb_cargs = def->nb_cargs;
+
+ fprintf(outfile, " %s ", def->name);
+
+ /* function name */
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + nb_iargs - 1]));
+ /* flags */
+ fprintf(outfile, ",$0x%" TCG_PRIlx,
+ args[nb_oargs + nb_iargs]);
+ /* nb out args */
+ fprintf(outfile, ",$%d", nb_oargs);
+ for(i = 0; i < nb_oargs; i++) {
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[i]));
+ }
+ for(i = 0; i < (nb_iargs - 1); i++) {
+ fprintf(outfile, ",");
+ if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
+ fprintf(outfile, "<dummy>");
+ } else {
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[nb_oargs + i]));
+ }
+ }
+ } else if (c == INDEX_op_movi_i32
+#if TCG_TARGET_REG_BITS == 64
+ || c == INDEX_op_movi_i64
+#endif
+ ) {
+ tcg_target_ulong val;
+ TCGHelperInfo *th;
+
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ nb_cargs = def->nb_cargs;
+ fprintf(outfile, " %s %s,$", def->name,
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
+ val = args[1];
+ th = tcg_find_helper(s, val);
+ if (th) {
+ fprintf(outfile, th->name);
+ } else {
+ if (c == INDEX_op_movi_i32)
+ fprintf(outfile, "0x%x", (uint32_t)val);
+ else
+ fprintf(outfile, "0x%" PRIx64 , (uint64_t)val);
+ }
+ } else {
+ fprintf(outfile, " %s ", def->name);
+ if (c == INDEX_op_nopn) {
+ /* variable number of arguments */
+ nb_cargs = *args;
+ nb_oargs = 0;
+ nb_iargs = 0;
+ } else {
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ nb_cargs = def->nb_cargs;
+ }
+
+ k = 0;
+ for(i = 0; i < nb_oargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
+ }
+ for(i = 0; i < nb_iargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ fprintf(outfile, "%s",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
+ }
+ if (c == INDEX_op_brcond_i32
+#if TCG_TARGET_REG_BITS == 32
+ || c == INDEX_op_brcond2_i32
+#elif TCG_TARGET_REG_BITS == 64
+ || c == INDEX_op_brcond_i64
+#endif
+ ) {
+ if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]])
+ fprintf(outfile, ",%s", cond_name[args[k++]]);
+ else
+ fprintf(outfile, ",$0x%" TCG_PRIlx, args[k++]);
+ i = 1;
+ }
+ else
+ i = 0;
+ for(; i < nb_cargs; i++) {
+ if (k != 0)
+ fprintf(outfile, ",");
+ arg = args[k++];
+ fprintf(outfile, "$0x%" TCG_PRIlx, arg);
+ }
+ }
+ fprintf(outfile, "\n");
+ args += nb_iargs + nb_oargs + nb_cargs;
+ }
+}
+
+/* we give more priority to constraints with less registers */
+static int get_constraint_priority(const TCGOpDef *def, int k)
+{
+ const TCGArgConstraint *arg_ct;
+
+ int i, n;
+ arg_ct = &def->args_ct[k];
+ if (arg_ct->ct & TCG_CT_ALIAS) {
+ /* an alias is equivalent to a single register */
+ n = 1;
+ } else {
+ if (!(arg_ct->ct & TCG_CT_REG))
+ return 0;
+ n = 0;
+ for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
+ if (tcg_regset_test_reg(arg_ct->u.regs, i))
+ n++;
+ }
+ }
+ return TCG_TARGET_NB_REGS - n + 1;
+}
+
+/* sort from highest priority to lowest */
+static void sort_constraints(TCGOpDef *def, int start, int n)
+{
+ int i, j, p1, p2, tmp;
+
+ for(i = 0; i < n; i++)
+ def->sorted_args[start + i] = start + i;
+ if (n <= 1)
+ return;
+ for(i = 0; i < n - 1; i++) {
+ for(j = i + 1; j < n; j++) {
+ p1 = get_constraint_priority(def, def->sorted_args[start + i]);
+ p2 = get_constraint_priority(def, def->sorted_args[start + j]);
+ if (p1 < p2) {
+ tmp = def->sorted_args[start + i];
+ def->sorted_args[start + i] = def->sorted_args[start + j];
+ def->sorted_args[start + j] = tmp;
+ }
+ }
+ }
+}
+
+void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
+{
+ int op;
+ TCGOpDef *def;
+ const char *ct_str;
+ int i, nb_args;
+
+ for(;;) {
+ if (tdefs->op < 0)
+ break;
+ op = tdefs->op;
+ assert(op >= 0 && op < NB_OPS);
+ def = &tcg_op_defs[op];
+ nb_args = def->nb_iargs + def->nb_oargs;
+ for(i = 0; i < nb_args; i++) {
+ ct_str = tdefs->args_ct_str[i];
+ tcg_regset_clear(def->args_ct[i].u.regs);
+ def->args_ct[i].ct = 0;
+ if (ct_str[0] >= '0' && ct_str[0] <= '9') {
+ int oarg;
+ oarg = ct_str[0] - '0';
+ assert(oarg < def->nb_oargs);
+ assert(def->args_ct[oarg].ct & TCG_CT_REG);
+ /* TCG_CT_ALIAS is for the output arguments. The input
+ argument is tagged with TCG_CT_IALIAS. */
+ def->args_ct[i] = def->args_ct[oarg];
+ def->args_ct[oarg].ct = TCG_CT_ALIAS;
+ def->args_ct[oarg].alias_index = i;
+ def->args_ct[i].ct |= TCG_CT_IALIAS;
+ def->args_ct[i].alias_index = oarg;
+ } else {
+ for(;;) {
+ if (*ct_str == '\0')
+ break;
+ switch(*ct_str) {
+ case 'i':
+ def->args_ct[i].ct |= TCG_CT_CONST;
+ ct_str++;
+ break;
+ default:
+ if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
+ fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
+ ct_str, i, def->name);
+ exit(1);
+ }
+ }
+ }
+ }
+ }
+
+ /* sort the constraints (XXX: this is just an heuristic) */
+ sort_constraints(def, 0, def->nb_oargs);
+ sort_constraints(def, def->nb_oargs, def->nb_iargs);
+
+#if 0
+ {
+ int i;
+
+ printf("%s: sorted=", def->name);
+ for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
+ printf(" %d", def->sorted_args[i]);
+ printf("\n");
+ }
+#endif
+ tdefs++;
+ }
+
+}
+
+#ifdef USE_LIVENESS_ANALYSIS
+
+/* set a nop for an operation using 'nb_args' */
+static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
+ TCGArg *args, int nb_args)
+{
+ if (nb_args == 0) {
+ *opc_ptr = INDEX_op_nop;
+ } else {
+ *opc_ptr = INDEX_op_nopn;
+ args[0] = nb_args;
+ args[nb_args - 1] = nb_args;
+ }
+}
+
+/* liveness analysis: end of function: globals are live, temps are
+ dead. */
+/* XXX: at this stage, not used as there would be little gains because
+ most TBs end with a conditional jump. */
+static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps)
+{
+ memset(dead_temps, 0, s->nb_globals);
+ memset(dead_temps + s->nb_globals, 1, s->nb_temps - s->nb_globals);
+}
+
+/* liveness analysis: end of basic block: globals are live, temps are
+ dead, local temps are live. */
+static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps)
+{
+ int i;
+ TCGTemp *ts;
+
+ memset(dead_temps, 0, s->nb_globals);
+ ts = &s->temps[s->nb_globals];
+ for(i = s->nb_globals; i < s->nb_temps; i++) {
+ if (ts->temp_local)
+ dead_temps[i] = 0;
+ else
+ dead_temps[i] = 1;
+ ts++;
+ }
+}
+
+/* Liveness analysis : update the opc_dead_iargs array to tell if a
+ given input arguments is dead. Instructions updating dead
+ temporaries are removed. */
+static void tcg_liveness_analysis(TCGContext *s)
+{
+ int i, op_index, op, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
+ TCGArg *args;
+ const TCGOpDef *def;
+ uint8_t *dead_temps;
+ unsigned int dead_iargs;
+
+ gen_opc_ptr++; /* skip end */
+
+ nb_ops = gen_opc_ptr - gen_opc_buf;
+
+ /* XXX: make it really dynamic */
+ s->op_dead_iargs = tcg_malloc(OPC_BUF_SIZE * sizeof(uint16_t));
+
+ dead_temps = tcg_malloc(s->nb_temps);
+ memset(dead_temps, 1, s->nb_temps);
+
+ args = gen_opparam_ptr;
+ op_index = nb_ops - 1;
+ while (op_index >= 0) {
+ op = gen_opc_buf[op_index];
+ def = &tcg_op_defs[op];
+ switch(op) {
+ case INDEX_op_call:
+ {
+ int call_flags;
+
+ nb_args = args[-1];
+ args -= nb_args;
+ nb_iargs = args[0] & 0xffff;
+ nb_oargs = args[0] >> 16;
+ args++;
+ call_flags = args[nb_oargs + nb_iargs];
+
+ /* pure functions can be removed if their result is not
+ used */
+ if (call_flags & TCG_CALL_PURE) {
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ if (!dead_temps[arg])
+ goto do_not_remove_call;
+ }
+ tcg_set_nop(s, gen_opc_buf + op_index,
+ args - 1, nb_args);
+ } else {
+ do_not_remove_call:
+
+ /* output args are dead */
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ dead_temps[arg] = 1;
+ }
+
+ /* globals are live (they may be used by the call) */
+ memset(dead_temps, 0, s->nb_globals);
+
+ /* input args are live */
+ dead_iargs = 0;
+ for(i = 0; i < nb_iargs; i++) {
+ arg = args[i + nb_oargs];
+ if (arg != TCG_CALL_DUMMY_ARG) {
+ if (dead_temps[arg]) {
+ dead_iargs |= (1 << i);
+ }
+ dead_temps[arg] = 0;
+ }
+ }
+ s->op_dead_iargs[op_index] = dead_iargs;
+ }
+ args--;
+ }
+ break;
+ case INDEX_op_set_label:
+ args--;
+ /* mark end of basic block */
+ tcg_la_bb_end(s, dead_temps);
+ break;
+ case INDEX_op_debug_insn_start:
+ args -= def->nb_args;
+ break;
+ case INDEX_op_nopn:
+ nb_args = args[-1];
+ args -= nb_args;
+ break;
+ case INDEX_op_discard:
+ args--;
+ /* mark the temporary as dead */
+ dead_temps[args[0]] = 1;
+ break;
+ case INDEX_op_end:
+ break;
+ /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
+ default:
+ if (op > INDEX_op_end) {
+ args -= def->nb_args;
+ nb_iargs = def->nb_iargs;
+ nb_oargs = def->nb_oargs;
+
+ /* Test if the operation can be removed because all
+ its outputs are dead. We assume that nb_oargs == 0
+ implies side effects */
+ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ if (!dead_temps[arg])
+ goto do_not_remove;
+ }
+ tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
+#ifdef CONFIG_PROFILER
+ s->del_op_count++;
+#endif
+ } else {
+ do_not_remove:
+
+ /* output args are dead */
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ dead_temps[arg] = 1;
+ }
+
+ /* if end of basic block, update */
+ if (def->flags & TCG_OPF_BB_END) {
+ tcg_la_bb_end(s, dead_temps);
+ } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
+ /* globals are live */
+ memset(dead_temps, 0, s->nb_globals);
+ }
+
+ /* input args are live */
+ dead_iargs = 0;
+ for(i = 0; i < nb_iargs; i++) {
+ arg = args[i + nb_oargs];
+ if (dead_temps[arg]) {
+ dead_iargs |= (1 << i);
+ }
+ dead_temps[arg] = 0;
+ }
+ s->op_dead_iargs[op_index] = dead_iargs;
+ }
+ } else {
+ /* legacy dyngen operations */
+ args -= def->nb_args;
+ /* mark end of basic block */
+ tcg_la_bb_end(s, dead_temps);
+ }
+ break;
+ }
+ op_index--;
+ }
+
+ if (args != gen_opparam_buf)
+ tcg_abort();
+}
+#else
+/* dummy liveness analysis */
+void tcg_liveness_analysis(TCGContext *s)
+{
+ int nb_ops;
+ nb_ops = gen_opc_ptr - gen_opc_buf;
+
+ s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t));
+ memset(s->op_dead_iargs, 0, nb_ops * sizeof(uint16_t));
+}
+#endif
+
+#ifndef NDEBUG
+static void dump_regs(TCGContext *s)
+{
+ TCGTemp *ts;
+ int i;
+ char buf[64];
+
+ for(i = 0; i < s->nb_temps; i++) {
+ ts = &s->temps[i];
+ printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
+ switch(ts->val_type) {
+ case TEMP_VAL_REG:
+ printf("%s", tcg_target_reg_names[ts->reg]);
+ break;
+ case TEMP_VAL_MEM:
+ printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
+ break;
+ case TEMP_VAL_CONST:
+ printf("$0x%" TCG_PRIlx, ts->val);
+ break;
+ case TEMP_VAL_DEAD:
+ printf("D");
+ break;
+ default:
+ printf("???");
+ break;
+ }
+ printf("\n");
+ }
+
+ for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
+ if (s->reg_to_temp[i] >= 0) {
+ printf("%s: %s\n",
+ tcg_target_reg_names[i],
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
+ }
+ }
+}
+
+static void check_regs(TCGContext *s)
+{
+ int reg, k;
+ TCGTemp *ts;
+ char buf[64];
+
+ for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
+ k = s->reg_to_temp[reg];
+ if (k >= 0) {
+ ts = &s->temps[k];
+ if (ts->val_type != TEMP_VAL_REG ||
+ ts->reg != reg) {
+ printf("Inconsistency for register %s:\n",
+ tcg_target_reg_names[reg]);
+ goto fail;
+ }
+ }
+ }
+ for(k = 0; k < s->nb_temps; k++) {
+ ts = &s->temps[k];
+ if (ts->val_type == TEMP_VAL_REG &&
+ !ts->fixed_reg &&
+ s->reg_to_temp[ts->reg] != k) {
+ printf("Inconsistency for temp %s:\n",
+ tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
+ fail:
+ printf("reg state:\n");
+ dump_regs(s);
+ tcg_abort();
+ }
+ }
+}
+#endif
+
+static void temp_allocate_frame(TCGContext *s, int temp)
+{
+ TCGTemp *ts;
+ ts = &s->temps[temp];
+ s->current_frame_offset = (s->current_frame_offset + sizeof(tcg_target_long) - 1) & ~(sizeof(tcg_target_long) - 1);
+ if (s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end)
+ tcg_abort();
+ ts->mem_offset = s->current_frame_offset;
+ ts->mem_reg = s->frame_reg;
+ ts->mem_allocated = 1;
+ s->current_frame_offset += sizeof(tcg_target_long);
+}
+
+/* free register 'reg' by spilling the corresponding temporary if necessary */
+static void tcg_reg_free(TCGContext *s, int reg)
+{
+ TCGTemp *ts;
+ int temp;
+
+ temp = s->reg_to_temp[reg];
+ if (temp != -1) {
+ ts = &s->temps[temp];
+ assert(ts->val_type == TEMP_VAL_REG);
+ if (!ts->mem_coherent) {
+ if (!ts->mem_allocated)
+ temp_allocate_frame(s, temp);
+ tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ }
+ ts->val_type = TEMP_VAL_MEM;
+ s->reg_to_temp[reg] = -1;
+ }
+}
+
+/* Allocate a register belonging to reg1 & ~reg2 */
+static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
+{
+ int i, reg;
+ TCGRegSet reg_ct;
+
+ tcg_regset_andnot(reg_ct, reg1, reg2);
+
+ /* first try free registers */
+ for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
+ reg = tcg_target_reg_alloc_order[i];
+ if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
+ return reg;
+ }
+
+ /* XXX: do better spill choice */
+ for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
+ reg = tcg_target_reg_alloc_order[i];
+ if (tcg_regset_test_reg(reg_ct, reg)) {
+ tcg_reg_free(s, reg);
+ return reg;
+ }
+ }
+
+ tcg_abort();
+}
+
+/* save a temporary to memory. 'allocated_regs' is used in case a
+ temporary registers needs to be allocated to store a constant. */
+static void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
+{
+ TCGTemp *ts;
+ int reg;
+
+ ts = &s->temps[temp];
+ if (!ts->fixed_reg) {
+ switch(ts->val_type) {
+ case TEMP_VAL_REG:
+ tcg_reg_free(s, ts->reg);
+ break;
+ case TEMP_VAL_DEAD:
+ ts->val_type = TEMP_VAL_MEM;
+ break;
+ case TEMP_VAL_CONST:
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
+ allocated_regs);
+ if (!ts->mem_allocated)
+ temp_allocate_frame(s, temp);
+ tcg_out_movi(s, ts->type, reg, ts->val);
+ tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ ts->val_type = TEMP_VAL_MEM;
+ break;
+ case TEMP_VAL_MEM:
+ break;
+ default:
+ tcg_abort();
+ }
+ }
+}
+
+/* save globals to their cannonical location and assume they can be
+ modified be the following code. 'allocated_regs' is used in case a
+ temporary registers needs to be allocated to store a constant. */
+static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
+{
+ int i;
+
+ for(i = 0; i < s->nb_globals; i++) {
+ temp_save(s, i, allocated_regs);
+ }
+}
+
+/* at the end of a basic block, we assume all temporaries are dead and
+ all globals are stored at their canonical location. */
+static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
+{
+ TCGTemp *ts;
+ int i;
+
+ for(i = s->nb_globals; i < s->nb_temps; i++) {
+ ts = &s->temps[i];
+ if (ts->temp_local) {
+ temp_save(s, i, allocated_regs);
+ } else {
+ if (ts->val_type == TEMP_VAL_REG) {
+ s->reg_to_temp[ts->reg] = -1;
+ }
+ ts->val_type = TEMP_VAL_DEAD;
+ }
+ }
+
+ save_globals(s, allocated_regs);
+}
+
+#define IS_DEAD_IARG(n) ((dead_iargs >> (n)) & 1)
+
+static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args)
+{
+ TCGTemp *ots;
+ tcg_target_ulong val;
+
+ ots = &s->temps[args[0]];
+ val = args[1];
+
+ if (ots->fixed_reg) {
+ /* for fixed registers, we do not do any constant
+ propagation */
+ tcg_out_movi(s, ots->type, ots->reg, val);
+ } else {
+ /* The movi is not explicitly generated here */
+ if (ots->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ots->reg] = -1;
+ ots->val_type = TEMP_VAL_CONST;
+ ots->val = val;
+ }
+}
+
+static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
+ const TCGArg *args,
+ unsigned int dead_iargs)
+{
+ TCGTemp *ts, *ots;
+ int reg;
+ const TCGArgConstraint *arg_ct;
+
+ ots = &s->temps[args[0]];
+ ts = &s->temps[args[1]];
+ arg_ct = &def->args_ct[0];
+
+ /* XXX: always mark arg dead if IS_DEAD_IARG(0) */
+ if (ts->val_type == TEMP_VAL_REG) {
+ if (IS_DEAD_IARG(0) && !ts->fixed_reg && !ots->fixed_reg) {
+ /* the mov can be suppressed */
+ if (ots->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ots->reg] = -1;
+ reg = ts->reg;
+ s->reg_to_temp[reg] = -1;
+ ts->val_type = TEMP_VAL_DEAD;
+ } else {
+ if (ots->val_type == TEMP_VAL_REG) {
+ reg = ots->reg;
+ } else {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
+ }
+ if (ts->reg != reg) {
+ tcg_out_mov(s, reg, ts->reg);
+ }
+ }
+ } else if (ts->val_type == TEMP_VAL_MEM) {
+ if (ots->val_type == TEMP_VAL_REG) {
+ reg = ots->reg;
+ } else {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
+ }
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ } else if (ts->val_type == TEMP_VAL_CONST) {
+ if (ots->fixed_reg) {
+ reg = ots->reg;
+ tcg_out_movi(s, ots->type, reg, ts->val);
+ } else {
+ /* propagate constant */
+ if (ots->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ots->reg] = -1;
+ ots->val_type = TEMP_VAL_CONST;
+ ots->val = ts->val;
+ return;
+ }
+ } else {
+ tcg_abort();
+ }
+ s->reg_to_temp[reg] = args[0];
+ ots->reg = reg;
+ ots->val_type = TEMP_VAL_REG;
+ ots->mem_coherent = 0;
+}
+
+static void tcg_reg_alloc_op(TCGContext *s,
+ const TCGOpDef *def, int opc,
+ const TCGArg *args,
+ unsigned int dead_iargs)
+{
+ TCGRegSet allocated_regs;
+ int i, k, nb_iargs, nb_oargs, reg;
+ TCGArg arg;
+ const TCGArgConstraint *arg_ct;
+ TCGTemp *ts;
+ TCGArg new_args[TCG_MAX_OP_ARGS];
+ int const_args[TCG_MAX_OP_ARGS];
+
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+
+ /* copy constants */
+ memcpy(new_args + nb_oargs + nb_iargs,
+ args + nb_oargs + nb_iargs,
+ sizeof(TCGArg) * def->nb_cargs);
+
+ /* satisfy input constraints */
+ tcg_regset_set(allocated_regs, s->reserved_regs);
+ for(k = 0; k < nb_iargs; k++) {
+ i = def->sorted_args[nb_oargs + k];
+ arg = args[i];
+ arg_ct = &def->args_ct[i];
+ ts = &s->temps[arg];
+ if (ts->val_type == TEMP_VAL_MEM) {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ ts->mem_coherent = 1;
+ s->reg_to_temp[reg] = arg;
+ } else if (ts->val_type == TEMP_VAL_CONST) {
+ if (tcg_target_const_match(ts->val, arg_ct)) {
+ /* constant is OK for instruction */
+ const_args[i] = 1;
+ new_args[i] = ts->val;
+ goto iarg_end;
+ } else {
+ /* need to move to a register */
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_movi(s, ts->type, reg, ts->val);
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ ts->mem_coherent = 0;
+ s->reg_to_temp[reg] = arg;
+ }
+ }
+ assert(ts->val_type == TEMP_VAL_REG);
+ if (arg_ct->ct & TCG_CT_IALIAS) {
+ if (ts->fixed_reg) {
+ /* if fixed register, we must allocate a new register
+ if the alias is not the same register */
+ if (arg != args[arg_ct->alias_index])
+ goto allocate_in_reg;
+ } else {
+ /* if the input is aliased to an output and if it is
+ not dead after the instruction, we must allocate
+ a new register and move it */
+ if (!IS_DEAD_IARG(i - nb_oargs))
+ goto allocate_in_reg;
+ }
+ }
+ reg = ts->reg;
+ if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
+ /* nothing to do : the constraint is satisfied */
+ } else {
+ allocate_in_reg:
+ /* allocate a new register matching the constraint
+ and move the temporary register into it */
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_mov(s, reg, ts->reg);
+ }
+ new_args[i] = reg;
+ const_args[i] = 0;
+ tcg_regset_set_reg(allocated_regs, reg);
+ iarg_end: ;
+ }
+
+ if (def->flags & TCG_OPF_BB_END) {
+ tcg_reg_alloc_bb_end(s, allocated_regs);
+ } else {
+ /* mark dead temporaries and free the associated registers */
+ for(i = 0; i < nb_iargs; i++) {
+ arg = args[nb_oargs + i];
+ if (IS_DEAD_IARG(i)) {
+ ts = &s->temps[arg];
+ if (!ts->fixed_reg) {
+ if (ts->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ts->reg] = -1;
+ ts->val_type = TEMP_VAL_DEAD;
+ }
+ }
+ }
+
+ if (def->flags & TCG_OPF_CALL_CLOBBER) {
+ /* XXX: permit generic clobber register list ? */
+ for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
+ if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
+ tcg_reg_free(s, reg);
+ }
+ }
+ /* XXX: for load/store we could do that only for the slow path
+ (i.e. when a memory callback is called) */
+
+ /* store globals and free associated registers (we assume the insn
+ can modify any global. */
+ save_globals(s, allocated_regs);
+ }
+
+ /* satisfy the output constraints */
+ tcg_regset_set(allocated_regs, s->reserved_regs);
+ for(k = 0; k < nb_oargs; k++) {
+ i = def->sorted_args[k];
+ arg = args[i];
+ arg_ct = &def->args_ct[i];
+ ts = &s->temps[arg];
+ if (arg_ct->ct & TCG_CT_ALIAS) {
+ reg = new_args[arg_ct->alias_index];
+ } else {
+ /* if fixed register, we try to use it */
+ reg = ts->reg;
+ if (ts->fixed_reg &&
+ tcg_regset_test_reg(arg_ct->u.regs, reg)) {
+ goto oarg_end;
+ }
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ }
+ tcg_regset_set_reg(allocated_regs, reg);
+ /* if a fixed register is used, then a move will be done afterwards */
+ if (!ts->fixed_reg) {
+ if (ts->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ts->reg] = -1;
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ /* temp value is modified, so the value kept in memory is
+ potentially not the same */
+ ts->mem_coherent = 0;
+ s->reg_to_temp[reg] = arg;
+ }
+ oarg_end:
+ new_args[i] = reg;
+ }
+ }
+
+ /* emit instruction */
+ tcg_out_op(s, opc, new_args, const_args);
+
+ /* move the outputs in the correct register if needed */
+ for(i = 0; i < nb_oargs; i++) {
+ ts = &s->temps[args[i]];
+ reg = new_args[i];
+ if (ts->fixed_reg && ts->reg != reg) {
+ tcg_out_mov(s, ts->reg, reg);
+ }
+ }
+}
+
+#ifdef TCG_TARGET_STACK_GROWSUP
+#define STACK_DIR(x) (-(x))
+#else
+#define STACK_DIR(x) (x)
+#endif
+
+static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
+ int opc, const TCGArg *args,
+ unsigned int dead_iargs)
+{
+ int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
+ TCGArg arg, func_arg;
+ TCGTemp *ts;
+ tcg_target_long stack_offset, call_stack_size, func_addr;
+ int const_func_arg, allocate_args;
+ TCGRegSet allocated_regs;
+ const TCGArgConstraint *arg_ct;
+
+ arg = *args++;
+
+ nb_oargs = arg >> 16;
+ nb_iargs = arg & 0xffff;
+ nb_params = nb_iargs - 1;
+
+ flags = args[nb_oargs + nb_iargs];
+
+ nb_regs = tcg_target_get_call_iarg_regs_count(flags);
+ if (nb_regs > nb_params)
+ nb_regs = nb_params;
+
+ /* assign stack slots first */
+ /* XXX: preallocate call stack */
+ call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
+ call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
+ ~(TCG_TARGET_STACK_ALIGN - 1);
+ allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
+ if (allocate_args) {
+ tcg_out_addi(s, TCG_REG_CALL_STACK, -STACK_DIR(call_stack_size));
+ }
+
+ stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
+ for(i = nb_regs; i < nb_params; i++) {
+ arg = args[nb_oargs + i];
+#ifdef TCG_TARGET_STACK_GROWSUP
+ stack_offset -= sizeof(tcg_target_long);
+#endif
+ if (arg != TCG_CALL_DUMMY_ARG) {
+ ts = &s->temps[arg];
+ if (ts->val_type == TEMP_VAL_REG) {
+ tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
+ } else if (ts->val_type == TEMP_VAL_MEM) {
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
+ s->reserved_regs);
+ /* XXX: not correct if reading values from the stack */
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
+ } else if (ts->val_type == TEMP_VAL_CONST) {
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
+ s->reserved_regs);
+ /* XXX: sign extend may be needed on some targets */
+ tcg_out_movi(s, ts->type, reg, ts->val);
+ tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
+ } else {
+ tcg_abort();
+ }
+ }
+#ifndef TCG_TARGET_STACK_GROWSUP
+ stack_offset += sizeof(tcg_target_long);
+#endif
+ }
+
+ /* assign input registers */
+ tcg_regset_set(allocated_regs, s->reserved_regs);
+ for(i = 0; i < nb_regs; i++) {
+ arg = args[nb_oargs + i];
+ if (arg != TCG_CALL_DUMMY_ARG) {
+ ts = &s->temps[arg];
+ reg = tcg_target_call_iarg_regs[i];
+ tcg_reg_free(s, reg);
+ if (ts->val_type == TEMP_VAL_REG) {
+ if (ts->reg != reg) {
+ tcg_out_mov(s, reg, ts->reg);
+ }
+ } else if (ts->val_type == TEMP_VAL_MEM) {
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ } else if (ts->val_type == TEMP_VAL_CONST) {
+ /* XXX: sign extend ? */
+ tcg_out_movi(s, ts->type, reg, ts->val);
+ } else {
+ tcg_abort();
+ }
+ tcg_regset_set_reg(allocated_regs, reg);
+ }
+ }
+
+ /* assign function address */
+ func_arg = args[nb_oargs + nb_iargs - 1];
+ arg_ct = &def->args_ct[0];
+ ts = &s->temps[func_arg];
+ func_addr = ts->val;
+ const_func_arg = 0;
+ if (ts->val_type == TEMP_VAL_MEM) {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
+ func_arg = reg;
+ tcg_regset_set_reg(allocated_regs, reg);
+ } else if (ts->val_type == TEMP_VAL_REG) {
+ reg = ts->reg;
+ if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_mov(s, reg, ts->reg);
+ }
+ func_arg = reg;
+ tcg_regset_set_reg(allocated_regs, reg);
+ } else if (ts->val_type == TEMP_VAL_CONST) {
+ if (tcg_target_const_match(func_addr, arg_ct)) {
+ const_func_arg = 1;
+ func_arg = func_addr;
+ } else {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
+ tcg_out_movi(s, ts->type, reg, func_addr);
+ func_arg = reg;
+ tcg_regset_set_reg(allocated_regs, reg);
+ }
+ } else {
+ tcg_abort();
+ }
+
+
+ /* mark dead temporaries and free the associated registers */
+ for(i = 0; i < nb_iargs; i++) {
+ arg = args[nb_oargs + i];
+ if (IS_DEAD_IARG(i)) {
+ ts = &s->temps[arg];
+ if (!ts->fixed_reg) {
+ if (ts->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ts->reg] = -1;
+ ts->val_type = TEMP_VAL_DEAD;
+ }
+ }
+ }
+
+ /* clobber call registers */
+ for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
+ if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
+ tcg_reg_free(s, reg);
+ }
+ }
+
+ /* store globals and free associated registers (we assume the call
+ can modify any global. */
+ save_globals(s, allocated_regs);
+
+ tcg_out_op(s, opc, &func_arg, &const_func_arg);
+
+ if (allocate_args) {
+ tcg_out_addi(s, TCG_REG_CALL_STACK, STACK_DIR(call_stack_size));
+ }
+
+ /* assign output registers and emit moves if needed */
+ for(i = 0; i < nb_oargs; i++) {
+ arg = args[i];
+ ts = &s->temps[arg];
+ reg = tcg_target_call_oarg_regs[i];
+ assert(s->reg_to_temp[reg] == -1);
+ if (ts->fixed_reg) {
+ if (ts->reg != reg) {
+ tcg_out_mov(s, ts->reg, reg);
+ }
+ } else {
+ if (ts->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ts->reg] = -1;
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
+ ts->mem_coherent = 0;
+ s->reg_to_temp[reg] = arg;
+ }
+ }
+
+ return nb_iargs + nb_oargs + def->nb_cargs + 1;
+}
+
+#ifdef CONFIG_PROFILER
+
+static int64_t dyngen_table_op_count[NB_OPS];
+
+void dump_op_count(void)
+{
+ int i;
+ FILE *f;
+ f = fopen("/tmp/op1.log", "w");
+ for(i = 0; i < INDEX_op_end; i++) {
+ fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]);
+ }
+ fclose(f);
+ f = fopen("/tmp/op2.log", "w");
+ for(i = INDEX_op_end; i < NB_OPS; i++) {
+ fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]);
+ }
+ fclose(f);
+}
+#endif
+
+
+static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
+ long search_pc)
+{
+ int opc, op_index;
+ const TCGOpDef *def;
+ unsigned int dead_iargs;
+ const TCGArg *args;
+
+#ifdef DEBUG_DISAS
+ if (unlikely(loglevel & CPU_LOG_TB_OP)) {
+ fprintf(logfile, "OP:\n");
+ tcg_dump_ops(s, logfile);
+ fprintf(logfile, "\n");
+ }
+#endif
+
+#ifdef CONFIG_PROFILER
+ s->la_time -= profile_getclock();
+#endif
+ tcg_liveness_analysis(s);
+#ifdef CONFIG_PROFILER
+ s->la_time += profile_getclock();
+#endif
+
+#ifdef DEBUG_DISAS
+ if (unlikely(loglevel & CPU_LOG_TB_OP_OPT)) {
+ fprintf(logfile, "OP after la:\n");
+ tcg_dump_ops(s, logfile);
+ fprintf(logfile, "\n");
+ }
+#endif
+
+ tcg_reg_alloc_start(s);
+
+ s->code_buf = gen_code_buf;
+ s->code_ptr = gen_code_buf;
+
+ args = gen_opparam_buf;
+ op_index = 0;
+
+ for(;;) {
+ opc = gen_opc_buf[op_index];
+#ifdef CONFIG_PROFILER
+ dyngen_table_op_count[opc]++;
+#endif
+ def = &tcg_op_defs[opc];
+#if 0
+ printf("%s: %d %d %d\n", def->name,
+ def->nb_oargs, def->nb_iargs, def->nb_cargs);
+ // dump_regs(s);
+#endif
+ switch(opc) {
+ case INDEX_op_mov_i32:
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_mov_i64:
+#endif
+ dead_iargs = s->op_dead_iargs[op_index];
+ tcg_reg_alloc_mov(s, def, args, dead_iargs);
+ break;
+ case INDEX_op_movi_i32:
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_movi_i64:
+#endif
+ tcg_reg_alloc_movi(s, args);
+ break;
+ case INDEX_op_debug_insn_start:
+ /* debug instruction */
+ break;
+ case INDEX_op_nop:
+ case INDEX_op_nop1:
+ case INDEX_op_nop2:
+ case INDEX_op_nop3:
+ break;
+ case INDEX_op_nopn:
+ args += args[0];
+ goto next;
+ case INDEX_op_discard:
+ {
+ TCGTemp *ts;
+ ts = &s->temps[args[0]];
+ /* mark the temporary as dead */
+ if (!ts->fixed_reg) {
+ if (ts->val_type == TEMP_VAL_REG)
+ s->reg_to_temp[ts->reg] = -1;
+ ts->val_type = TEMP_VAL_DEAD;
+ }
+ }
+ break;
+ case INDEX_op_set_label:
+ tcg_reg_alloc_bb_end(s, s->reserved_regs);
+ tcg_out_label(s, args[0], (long)s->code_ptr);
+ break;
+ case INDEX_op_call:
+ dead_iargs = s->op_dead_iargs[op_index];
+ args += tcg_reg_alloc_call(s, def, opc, args, dead_iargs);
+ goto next;
+ case INDEX_op_end:
+ goto the_end;
+
+#ifdef CONFIG_DYNGEN_OP
+ case 0 ... INDEX_op_end - 1:
+ /* legacy dyngen ops */
+#ifdef CONFIG_PROFILER
+ s->old_op_count++;
+#endif
+ tcg_reg_alloc_bb_end(s, s->reserved_regs);
+ if (search_pc >= 0) {
+ s->code_ptr += def->copy_size;
+ args += def->nb_args;
+ } else {
+ args = dyngen_op(s, opc, args);
+ }
+ goto next;
+#endif
+ default:
+ /* Note: in order to speed up the code, it would be much
+ faster to have specialized register allocator functions for
+ some common argument patterns */
+ dead_iargs = s->op_dead_iargs[op_index];
+ tcg_reg_alloc_op(s, def, opc, args, dead_iargs);
+ break;
+ }
+ args += def->nb_args;
+ next:
+ if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
+ return op_index;
+ }
+ op_index++;
+#ifndef NDEBUG
+ check_regs(s);
+#endif
+ }
+ the_end:
+ return -1;
+}
+
+int dyngen_code(TCGContext *s, uint8_t *gen_code_buf)
+{
+#ifdef CONFIG_PROFILER
+ {
+ int n;
+ n = (gen_opc_ptr - gen_opc_buf);
+ s->op_count += n;
+ if (n > s->op_count_max)
+ s->op_count_max = n;
+
+ s->temp_count += s->nb_temps;
+ if (s->nb_temps > s->temp_count_max)
+ s->temp_count_max = s->nb_temps;
+ }
+#endif
+
+ tcg_gen_code_common(s, gen_code_buf, -1);
+
+ /* flush instruction cache */
+ flush_icache_range((unsigned long)gen_code_buf,
+ (unsigned long)s->code_ptr);
+ return s->code_ptr - gen_code_buf;
+}
+
+/* Return the index of the micro operation such as the pc after is <
+ offset bytes from the start of the TB. The contents of gen_code_buf must
+ not be changed, though writing the same values is ok.
+ Return -1 if not found. */
+int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
+{
+ return tcg_gen_code_common(s, gen_code_buf, offset);
+}
+
+#ifdef CONFIG_PROFILER
+void tcg_dump_info(FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+{
+ TCGContext *s = &tcg_ctx;
+ int64_t tot;
+
+ tot = s->interm_time + s->code_time;
+ cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
+ tot, tot / 2.4e9);
+ cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
+ s->tb_count,
+ s->tb_count1 - s->tb_count,
+ s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
+ cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
+ s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
+ cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
+ s->op_count ? (double)s->old_op_count / s->op_count * 100.0 : 0);
+ cpu_fprintf(f, "deleted ops/TB %0.2f\n",
+ s->tb_count ?
+ (double)s->del_op_count / s->tb_count : 0);
+ cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
+ s->tb_count ?
+ (double)s->temp_count / s->tb_count : 0,
+ s->temp_count_max);
+
+ cpu_fprintf(f, "cycles/op %0.1f\n",
+ s->op_count ? (double)tot / s->op_count : 0);
+ cpu_fprintf(f, "cycles/in byte %0.1f\n",
+ s->code_in_len ? (double)tot / s->code_in_len : 0);
+ cpu_fprintf(f, "cycles/out byte %0.1f\n",
+ s->code_out_len ? (double)tot / s->code_out_len : 0);
+ if (tot == 0)
+ tot = 1;
+ cpu_fprintf(f, " gen_interm time %0.1f%%\n",
+ (double)s->interm_time / tot * 100.0);
+ cpu_fprintf(f, " gen_code time %0.1f%%\n",
+ (double)s->code_time / tot * 100.0);
+ cpu_fprintf(f, "liveness/code time %0.1f%%\n",
+ (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
+ cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
+ s->restore_count);
+ cpu_fprintf(f, " avg cycles %0.1f\n",
+ s->restore_count ? (double)s->restore_time / s->restore_count : 0);
+ {
+ extern void dump_op_count(void);
+ dump_op_count();
+ }
+}
+#else
+void tcg_dump_info(FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+{
+ cpu_fprintf(f, "[TCG profiler not compiled]\n");
+}
+#endif
diff --git a/tcg/tcg.h b/tcg/tcg.h
new file mode 100644
index 0000000..bc5b902
--- /dev/null
+++ b/tcg/tcg.h
@@ -0,0 +1,421 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "tcg-target.h"
+
+#if TCG_TARGET_REG_BITS == 32
+typedef int32_t tcg_target_long;
+typedef uint32_t tcg_target_ulong;
+#define TCG_PRIlx PRIx32
+#define TCG_PRIld PRId32
+#elif TCG_TARGET_REG_BITS == 64
+typedef int64_t tcg_target_long;
+typedef uint64_t tcg_target_ulong;
+#define TCG_PRIlx PRIx64
+#define TCG_PRIld PRId64
+#else
+#error unsupported
+#endif
+
+#if TCG_TARGET_NB_REGS <= 32
+typedef uint32_t TCGRegSet;
+#elif TCG_TARGET_NB_REGS <= 64
+typedef uint64_t TCGRegSet;
+#else
+#error unsupported
+#endif
+
+enum {
+#define DEF(s, n, copy_size) INDEX_op_ ## s,
+#include "tcg-opc.h"
+#undef DEF
+ NB_OPS,
+};
+
+#define tcg_regset_clear(d) (d) = 0
+#define tcg_regset_set(d, s) (d) = (s)
+#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
+#define tcg_regset_set_reg(d, r) (d) |= 1 << (r)
+#define tcg_regset_reset_reg(d, r) (d) &= ~(1 << (r))
+#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
+#define tcg_regset_or(d, a, b) (d) = (a) | (b)
+#define tcg_regset_and(d, a, b) (d) = (a) & (b)
+#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
+#define tcg_regset_not(d, a) (d) = ~(a)
+
+typedef struct TCGRelocation {
+ struct TCGRelocation *next;
+ int type;
+ uint8_t *ptr;
+ tcg_target_long addend;
+} TCGRelocation;
+
+typedef struct TCGLabel {
+ int has_value;
+ union {
+ tcg_target_ulong value;
+ TCGRelocation *first_reloc;
+ } u;
+} TCGLabel;
+
+typedef struct TCGPool {
+ struct TCGPool *next;
+ int size;
+ uint8_t data[0] __attribute__ ((aligned));
+} TCGPool;
+
+#define TCG_POOL_CHUNK_SIZE 32768
+
+#define TCG_MAX_LABELS 512
+
+#define TCG_MAX_TEMPS 512
+
+/* when the size of the arguments of a called function is smaller than
+ this value, they are statically allocated in the TB stack frame */
+#define TCG_STATIC_CALL_ARGS_SIZE 128
+
+typedef int TCGType;
+
+#define TCG_TYPE_I32 0
+#define TCG_TYPE_I64 1
+#define TCG_TYPE_COUNT 2 /* number of different types */
+
+#if TCG_TARGET_REG_BITS == 32
+#define TCG_TYPE_PTR TCG_TYPE_I32
+#else
+#define TCG_TYPE_PTR TCG_TYPE_I64
+#endif
+
+typedef tcg_target_ulong TCGArg;
+
+/* Define a type and accessor macros for varables. Using a struct is
+ nice because it gives some level of type safely. Ideally the compiler
+ be able to see through all this. However in practice this is not true,
+ expecially on targets with braindamaged ABIs (e.g. i386).
+ We use plain int by default to avoid this runtime overhead.
+ Users of tcg_gen_* don't need to know about any of this, and should
+ treat TCGv as an opaque type. */
+
+//#define DEBUG_TCGV 1
+
+#ifdef DEBUG_TCGV
+
+typedef struct
+{
+ int n;
+} TCGv;
+
+#define MAKE_TCGV(i) __extension__ \
+ ({ TCGv make_tcgv_tmp = {i}; make_tcgv_tmp;})
+#define GET_TCGV(t) ((t).n)
+#if TCG_TARGET_REG_BITS == 32
+#define TCGV_HIGH(t) MAKE_TCGV(GET_TCGV(t) + 1)
+#endif
+
+#else /* !DEBUG_TCGV */
+
+typedef int TCGv;
+#define MAKE_TCGV(x) (x)
+#define GET_TCGV(t) (t)
+#if TCG_TARGET_REG_BITS == 32
+#define TCGV_HIGH(t) ((t) + 1)
+#endif
+
+#endif /* DEBUG_TCGV */
+
+/* Dummy definition to avoid compiler warnings. */
+#define TCGV_UNUSED(x) x = MAKE_TCGV(-1)
+
+/* call flags */
+#define TCG_CALL_TYPE_MASK 0x000f
+#define TCG_CALL_TYPE_STD 0x0000 /* standard C call */
+#define TCG_CALL_TYPE_REGPARM_1 0x0001 /* i386 style regparm call (1 reg) */
+#define TCG_CALL_TYPE_REGPARM_2 0x0002 /* i386 style regparm call (2 regs) */
+#define TCG_CALL_TYPE_REGPARM 0x0003 /* i386 style regparm call (3 regs) */
+/* A pure function only reads its arguments and globals variables and
+ cannot raise exceptions. Hence a call to a pure function can be
+ safely suppressed if the return value is not used. */
+#define TCG_CALL_PURE 0x0010
+
+/* used to align parameters */
+#define TCG_CALL_DUMMY_TCGV MAKE_TCGV(-1)
+#define TCG_CALL_DUMMY_ARG ((TCGArg)(-1))
+
+typedef enum {
+ TCG_COND_EQ,
+ TCG_COND_NE,
+ TCG_COND_LT,
+ TCG_COND_GE,
+ TCG_COND_LE,
+ TCG_COND_GT,
+ /* unsigned */
+ TCG_COND_LTU,
+ TCG_COND_GEU,
+ TCG_COND_LEU,
+ TCG_COND_GTU,
+} TCGCond;
+
+#define TEMP_VAL_DEAD 0
+#define TEMP_VAL_REG 1
+#define TEMP_VAL_MEM 2
+#define TEMP_VAL_CONST 3
+
+/* XXX: optimize memory layout */
+typedef struct TCGTemp {
+ TCGType base_type;
+ TCGType type;
+ int val_type;
+ int reg;
+ tcg_target_long val;
+ int mem_reg;
+ tcg_target_long mem_offset;
+ unsigned int fixed_reg:1;
+ unsigned int mem_coherent:1;
+ unsigned int mem_allocated:1;
+ unsigned int temp_local:1; /* If true, the temp is saved accross
+ basic blocks. Otherwise, it is not
+ preserved accross basic blocks. */
+ unsigned int temp_allocated:1; /* never used for code gen */
+ /* index of next free temp of same base type, -1 if end */
+ int next_free_temp;
+ const char *name;
+} TCGTemp;
+
+typedef struct TCGHelperInfo {
+ tcg_target_ulong func;
+ const char *name;
+} TCGHelperInfo;
+
+typedef struct TCGContext TCGContext;
+
+struct TCGContext {
+ uint8_t *pool_cur, *pool_end;
+ TCGPool *pool_first, *pool_current;
+ TCGLabel *labels;
+ int nb_labels;
+ TCGTemp *temps; /* globals first, temps after */
+ int nb_globals;
+ int nb_temps;
+ /* index of free temps, -1 if none */
+ int first_free_temp[TCG_TYPE_COUNT * 2];
+
+ /* goto_tb support */
+ uint8_t *code_buf;
+ unsigned long *tb_next;
+ uint16_t *tb_next_offset;
+ uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
+
+ /* liveness analysis */
+ uint16_t *op_dead_iargs; /* for each operation, each bit tells if the
+ corresponding input argument is dead */
+
+ /* tells in which temporary a given register is. It does not take
+ into account fixed registers */
+ int reg_to_temp[TCG_TARGET_NB_REGS];
+ TCGRegSet reserved_regs;
+ tcg_target_long current_frame_offset;
+ tcg_target_long frame_start;
+ tcg_target_long frame_end;
+ int frame_reg;
+
+ uint8_t *code_ptr;
+ TCGTemp static_temps[TCG_MAX_TEMPS];
+
+ TCGHelperInfo *helpers;
+ int nb_helpers;
+ int allocated_helpers;
+ int helpers_sorted;
+
+#ifdef CONFIG_PROFILER
+ /* profiling info */
+ int64_t tb_count1;
+ int64_t tb_count;
+ int64_t op_count; /* total insn count */
+ int op_count_max; /* max insn per TB */
+ int64_t temp_count;
+ int temp_count_max;
+ int64_t old_op_count;
+ int64_t del_op_count;
+ int64_t code_in_len;
+ int64_t code_out_len;
+ int64_t interm_time;
+ int64_t code_time;
+ int64_t la_time;
+ int64_t restore_count;
+ int64_t restore_time;
+#endif
+};
+
+extern TCGContext tcg_ctx;
+extern uint16_t *gen_opc_ptr;
+extern TCGArg *gen_opparam_ptr;
+extern uint16_t gen_opc_buf[];
+extern TCGArg gen_opparam_buf[];
+
+/* pool based memory allocation */
+
+void *tcg_malloc_internal(TCGContext *s, int size);
+void tcg_pool_reset(TCGContext *s);
+void tcg_pool_delete(TCGContext *s);
+
+static inline void *tcg_malloc(int size)
+{
+ TCGContext *s = &tcg_ctx;
+ uint8_t *ptr, *ptr_end;
+ size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1);
+ ptr = s->pool_cur;
+ ptr_end = ptr + size;
+ if (unlikely(ptr_end > s->pool_end)) {
+ return tcg_malloc_internal(&tcg_ctx, size);
+ } else {
+ s->pool_cur = ptr_end;
+ return ptr;
+ }
+}
+
+void tcg_context_init(TCGContext *s);
+void tcg_func_start(TCGContext *s);
+
+int dyngen_code(TCGContext *s, uint8_t *gen_code_buf);
+int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset);
+
+void tcg_set_frame(TCGContext *s, int reg,
+ tcg_target_long start, tcg_target_long size);
+TCGv tcg_global_reg_new(TCGType type, int reg, const char *name);
+TCGv tcg_global_reg2_new_hack(TCGType type, int reg1, int reg2,
+ const char *name);
+TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset,
+ const char *name);
+TCGv tcg_temp_new_internal(TCGType type, int temp_local);
+static inline TCGv tcg_temp_new(TCGType type)
+{
+ return tcg_temp_new_internal(type, 0);
+}
+static inline TCGv tcg_temp_local_new(TCGType type)
+{
+ return tcg_temp_new_internal(type, 1);
+}
+void tcg_temp_free(TCGv arg);
+char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGv arg);
+void tcg_dump_info(FILE *f,
+ int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
+
+#define TCG_CT_ALIAS 0x80
+#define TCG_CT_IALIAS 0x40
+#define TCG_CT_REG 0x01
+#define TCG_CT_CONST 0x02 /* any constant of register size */
+
+typedef struct TCGArgConstraint {
+ uint16_t ct;
+ uint8_t alias_index;
+ union {
+ TCGRegSet regs;
+ } u;
+} TCGArgConstraint;
+
+#define TCG_MAX_OP_ARGS 16
+
+#define TCG_OPF_BB_END 0x01 /* instruction defines the end of a basic
+ block */
+#define TCG_OPF_CALL_CLOBBER 0x02 /* instruction clobbers call registers
+ and potentially update globals. */
+#define TCG_OPF_SIDE_EFFECTS 0x04 /* instruction has side effects : it
+ cannot be removed if its output
+ are not used */
+
+typedef struct TCGOpDef {
+ const char *name;
+ uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
+ uint8_t flags;
+ uint16_t copy_size;
+ TCGArgConstraint *args_ct;
+ int *sorted_args;
+} TCGOpDef;
+
+typedef struct TCGTargetOpDef {
+ int op;
+ const char *args_ct_str[TCG_MAX_OP_ARGS];
+} TCGTargetOpDef;
+
+extern TCGOpDef tcg_op_defs[];
+
+void tcg_target_init(TCGContext *s);
+void tcg_target_qemu_prologue(TCGContext *s);
+
+#define tcg_abort() \
+do {\
+ fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
+ abort();\
+} while (0)
+
+void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
+
+void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,
+ unsigned int nb_rets, const TCGv *rets,
+ unsigned int nb_params, const TCGv *args1);
+void tcg_gen_shifti_i64(TCGv ret, TCGv arg1,
+ int c, int right, int arith);
+
+/* only used for debugging purposes */
+void tcg_register_helper(void *func, const char *name);
+#define TCG_HELPER(func) tcg_register_helper(func, #func)
+const char *tcg_helper_get_name(TCGContext *s, void *func);
+void tcg_dump_ops(TCGContext *s, FILE *outfile);
+
+void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
+TCGv tcg_const_i32(int32_t val);
+TCGv tcg_const_i64(int64_t val);
+
+#if TCG_TARGET_REG_BITS == 32
+#define tcg_const_ptr tcg_const_i32
+#define tcg_add_ptr tcg_add_i32
+#define tcg_sub_ptr tcg_sub_i32
+#else
+#define tcg_const_ptr tcg_const_i64
+#define tcg_add_ptr tcg_add_i64
+#define tcg_sub_ptr tcg_sub_i64
+#endif
+
+void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
+ int label_index, long addend);
+const TCGArg *tcg_gen_code_op(TCGContext *s, int opc, const TCGArg *args1,
+ unsigned int dead_iargs);
+
+const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr);
+
+/* tcg-runtime.c */
+int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2);
+int64_t tcg_helper_shr_i64(int64_t arg1, int64_t arg2);
+int64_t tcg_helper_sar_i64(int64_t arg1, int64_t arg2);
+int64_t tcg_helper_div_i64(int64_t arg1, int64_t arg2);
+int64_t tcg_helper_rem_i64(int64_t arg1, int64_t arg2);
+uint64_t tcg_helper_divu_i64(uint64_t arg1, uint64_t arg2);
+uint64_t tcg_helper_remu_i64(uint64_t arg1, uint64_t arg2);
+
+extern uint8_t code_gen_prologue[];
+#if defined(__powerpc__) && !defined(__powerpc64__)
+#define tcg_qemu_tb_exec(tb_ptr) \
+ ((long REGPARM __attribute__ ((longcall)) (*)(void *))code_gen_prologue)(tb_ptr)
+#else
+#define tcg_qemu_tb_exec(tb_ptr) ((long REGPARM (*)(void *))code_gen_prologue)(tb_ptr)
+#endif
diff --git a/tcg/x86_64/tcg-target.c b/tcg/x86_64/tcg-target.c
new file mode 100644
index 0000000..304a0c3
--- /dev/null
+++ b/tcg/x86_64/tcg-target.c
@@ -0,0 +1,1307 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%rax",
+ "%rcx",
+ "%rdx",
+ "%rbx",
+ "%rsp",
+ "%rbp",
+ "%rsi",
+ "%rdi",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15",
+};
+
+int tcg_target_reg_alloc_order[] = {
+ TCG_REG_RDI,
+ TCG_REG_RSI,
+ TCG_REG_RDX,
+ TCG_REG_RCX,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_RAX,
+ TCG_REG_R10,
+ TCG_REG_R11,
+
+ TCG_REG_RBP,
+ TCG_REG_RBX,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+};
+
+const int tcg_target_call_iarg_regs[6] = {
+ TCG_REG_RDI,
+ TCG_REG_RSI,
+ TCG_REG_RDX,
+ TCG_REG_RCX,
+ TCG_REG_R8,
+ TCG_REG_R9,
+};
+
+const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_RAX,
+ TCG_REG_RDX
+};
+
+static uint8_t *tb_ret_addr;
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch(type) {
+ case R_X86_64_32:
+ if (value != (uint32_t)value)
+ tcg_abort();
+ *(uint32_t *)code_ptr = value;
+ break;
+ case R_X86_64_32S:
+ if (value != (int32_t)value)
+ tcg_abort();
+ *(uint32_t *)code_ptr = value;
+ break;
+ case R_386_PC32:
+ value -= (long)code_ptr;
+ if (value != (int32_t)value)
+ tcg_abort();
+ *(uint32_t *)code_ptr = value;
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 6;
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch(ct_str[0]) {
+ case 'a':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RAX);
+ break;
+ case 'b':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RBX);
+ break;
+ case 'c':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RCX);
+ break;
+ case 'd':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RDX);
+ break;
+ case 'S':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RSI);
+ break;
+ case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, TCG_REG_RDI);
+ break;
+ case 'q':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xf);
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ break;
+ case 'L': /* qemu_ld/st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
+ break;
+ case 'e':
+ ct->ct |= TCG_CT_CONST_S32;
+ break;
+ case 'Z':
+ ct->ct |= TCG_CT_CONST_U32;
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val)
+ return 1;
+ else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val)
+ return 1;
+ else
+ return 0;
+}
+
+#define ARITH_ADD 0
+#define ARITH_OR 1
+#define ARITH_ADC 2
+#define ARITH_SBB 3
+#define ARITH_AND 4
+#define ARITH_SUB 5
+#define ARITH_XOR 6
+#define ARITH_CMP 7
+
+#define SHIFT_SHL 4
+#define SHIFT_SHR 5
+#define SHIFT_SAR 7
+
+#define JCC_JMP (-1)
+#define JCC_JO 0x0
+#define JCC_JNO 0x1
+#define JCC_JB 0x2
+#define JCC_JAE 0x3
+#define JCC_JE 0x4
+#define JCC_JNE 0x5
+#define JCC_JBE 0x6
+#define JCC_JA 0x7
+#define JCC_JS 0x8
+#define JCC_JNS 0x9
+#define JCC_JP 0xa
+#define JCC_JNP 0xb
+#define JCC_JL 0xc
+#define JCC_JGE 0xd
+#define JCC_JLE 0xe
+#define JCC_JG 0xf
+
+#define P_EXT 0x100 /* 0x0f opcode prefix */
+#define P_REXW 0x200 /* set rex.w = 1 */
+#define P_REXB 0x400 /* force rex use for byte registers */
+
+static const uint8_t tcg_cond_to_jcc[10] = {
+ [TCG_COND_EQ] = JCC_JE,
+ [TCG_COND_NE] = JCC_JNE,
+ [TCG_COND_LT] = JCC_JL,
+ [TCG_COND_GE] = JCC_JGE,
+ [TCG_COND_LE] = JCC_JLE,
+ [TCG_COND_GT] = JCC_JG,
+ [TCG_COND_LTU] = JCC_JB,
+ [TCG_COND_GEU] = JCC_JAE,
+ [TCG_COND_LEU] = JCC_JBE,
+ [TCG_COND_GTU] = JCC_JA,
+};
+
+static inline void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
+{
+ int rex;
+ rex = ((opc >> 6) & 0x8) | ((r >> 1) & 0x4) |
+ ((x >> 2) & 2) | ((rm >> 3) & 1);
+ if (rex || (opc & P_REXB)) {
+ tcg_out8(s, rex | 0x40);
+ }
+ if (opc & P_EXT)
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, opc);
+}
+
+static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
+{
+ tcg_out_opc(s, opc, r, rm, 0);
+ tcg_out8(s, 0xc0 | ((r & 7) << 3) | (rm & 7));
+}
+
+/* rm < 0 means no register index plus (-rm - 1 immediate bytes) */
+static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
+ tcg_target_long offset)
+{
+ if (rm < 0) {
+ tcg_target_long val;
+ tcg_out_opc(s, opc, r, 0, 0);
+ val = offset - ((tcg_target_long)s->code_ptr + 5 + (-rm - 1));
+ if (val == (int32_t)val) {
+ /* eip relative */
+ tcg_out8(s, 0x05 | ((r & 7) << 3));
+ tcg_out32(s, val);
+ } else if (offset == (int32_t)offset) {
+ tcg_out8(s, 0x04 | ((r & 7) << 3));
+ tcg_out8(s, 0x25); /* sib */
+ tcg_out32(s, offset);
+ } else {
+ tcg_abort();
+ }
+ } else if (offset == 0 && (rm & 7) != TCG_REG_RBP) {
+ tcg_out_opc(s, opc, r, rm, 0);
+ if ((rm & 7) == TCG_REG_RSP) {
+ tcg_out8(s, 0x04 | ((r & 7) << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x00 | ((r & 7) << 3) | (rm & 7));
+ }
+ } else if ((int8_t)offset == offset) {
+ tcg_out_opc(s, opc, r, rm, 0);
+ if ((rm & 7) == TCG_REG_RSP) {
+ tcg_out8(s, 0x44 | ((r & 7) << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x40 | ((r & 7) << 3) | (rm & 7));
+ }
+ tcg_out8(s, offset);
+ } else {
+ tcg_out_opc(s, opc, r, rm, 0);
+ if ((rm & 7) == TCG_REG_RSP) {
+ tcg_out8(s, 0x84 | ((r & 7) << 3));
+ tcg_out8(s, 0x24);
+ } else {
+ tcg_out8(s, 0x80 | ((r & 7) << 3) | (rm & 7));
+ }
+ tcg_out32(s, offset);
+ }
+}
+
+#if defined(CONFIG_SOFTMMU)
+/* XXX: incomplete. index must be different from ESP */
+static void tcg_out_modrm_offset2(TCGContext *s, int opc, int r, int rm,
+ int index, int shift,
+ tcg_target_long offset)
+{
+ int mod;
+ if (rm == -1)
+ tcg_abort();
+ if (offset == 0 && (rm & 7) != TCG_REG_RBP) {
+ mod = 0;
+ } else if (offset == (int8_t)offset) {
+ mod = 0x40;
+ } else if (offset == (int32_t)offset) {
+ mod = 0x80;
+ } else {
+ tcg_abort();
+ }
+ if (index == -1) {
+ tcg_out_opc(s, opc, r, rm, 0);
+ if ((rm & 7) == TCG_REG_RSP) {
+ tcg_out8(s, mod | ((r & 7) << 3) | 0x04);
+ tcg_out8(s, 0x04 | (rm & 7));
+ } else {
+ tcg_out8(s, mod | ((r & 7) << 3) | (rm & 7));
+ }
+ } else {
+ tcg_out_opc(s, opc, r, rm, index);
+ tcg_out8(s, mod | ((r & 7) << 3) | 0x04);
+ tcg_out8(s, (shift << 6) | ((index & 7) << 3) | (rm & 7));
+ }
+ if (mod == 0x40) {
+ tcg_out8(s, offset);
+ } else if (mod == 0x80) {
+ tcg_out32(s, offset);
+ }
+}
+#endif
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out_modrm(s, 0x8b | P_REXW, ret, arg);
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ if (arg == 0) {
+ tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); /* xor r0,r0 */
+ } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
+ tcg_out_opc(s, 0xb8 + (ret & 7), 0, ret, 0);
+ tcg_out32(s, arg);
+ } else if (arg == (int32_t)arg) {
+ tcg_out_modrm(s, 0xc7 | P_REXW, 0, ret);
+ tcg_out32(s, arg);
+ } else {
+ tcg_out_opc(s, (0xb8 + (ret & 7)) | P_REXW, 0, ret, 0);
+ tcg_out32(s, arg);
+ tcg_out32(s, arg >> 32);
+ }
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2); /* movl */
+ else
+ tcg_out_modrm_offset(s, 0x8b | P_REXW, ret, arg1, arg2); /* movq */
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2); /* movl */
+ else
+ tcg_out_modrm_offset(s, 0x89 | P_REXW, arg, arg1, arg2); /* movq */
+}
+
+static inline void tgen_arithi32(TCGContext *s, int c, int r0, int32_t val)
+{
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x83, c, r0);
+ tcg_out8(s, val);
+ } else if (c == ARITH_AND && val == 0xffu) {
+ /* movzbl */
+ tcg_out_modrm(s, 0xb6 | P_EXT | P_REXB, r0, r0);
+ } else if (c == ARITH_AND && val == 0xffffu) {
+ /* movzwl */
+ tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0);
+ } else {
+ tcg_out_modrm(s, 0x81, c, r0);
+ tcg_out32(s, val);
+ }
+}
+
+static inline void tgen_arithi64(TCGContext *s, int c, int r0, int64_t val)
+{
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x83 | P_REXW, c, r0);
+ tcg_out8(s, val);
+ } else if (c == ARITH_AND && val == 0xffu) {
+ /* movzbl */
+ tcg_out_modrm(s, 0xb6 | P_EXT | P_REXW, r0, r0);
+ } else if (c == ARITH_AND && val == 0xffffu) {
+ /* movzwl */
+ tcg_out_modrm(s, 0xb7 | P_EXT | P_REXW, r0, r0);
+ } else if (c == ARITH_AND && val == 0xffffffffu) {
+ /* 32-bit mov zero extends */
+ tcg_out_modrm(s, 0x8b, r0, r0);
+ } else if (val == (int32_t)val) {
+ tcg_out_modrm(s, 0x81 | P_REXW, c, r0);
+ tcg_out32(s, val);
+ } else if (c == ARITH_AND && val == (uint32_t)val) {
+ tcg_out_modrm(s, 0x81, c, r0);
+ tcg_out32(s, val);
+ } else {
+ tcg_abort();
+ }
+}
+
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0)
+ tgen_arithi64(s, ARITH_ADD, reg, val);
+}
+
+static void tcg_out_jxx(TCGContext *s, int opc, int label_index)
+{
+ int32_t val, val1;
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value) {
+ val = l->u.value - (tcg_target_long)s->code_ptr;
+ val1 = val - 2;
+ if ((int8_t)val1 == val1) {
+ if (opc == -1)
+ tcg_out8(s, 0xeb);
+ else
+ tcg_out8(s, 0x70 + opc);
+ tcg_out8(s, val1);
+ } else {
+ if (opc == -1) {
+ tcg_out8(s, 0xe9);
+ tcg_out32(s, val - 5);
+ } else {
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, 0x80 + opc);
+ tcg_out32(s, val - 6);
+ }
+ }
+ } else {
+ if (opc == -1) {
+ tcg_out8(s, 0xe9);
+ } else {
+ tcg_out8(s, 0x0f);
+ tcg_out8(s, 0x80 + opc);
+ }
+ tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
+ s->code_ptr += 4;
+ }
+}
+
+static void tcg_out_brcond(TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index, int rexw)
+{
+ if (const_arg2) {
+ if (arg2 == 0) {
+ /* test r, r */
+ tcg_out_modrm(s, 0x85 | rexw, arg1, arg1);
+ } else {
+ if (rexw)
+ tgen_arithi64(s, ARITH_CMP, arg1, arg2);
+ else
+ tgen_arithi32(s, ARITH_CMP, arg1, arg2);
+ }
+ } else {
+ tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3) | rexw, arg2, arg1);
+ }
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ r0 = TCG_REG_RDI;
+ r1 = TCG_REG_RSI;
+
+#if TARGET_LONG_BITS == 32
+ rexw = 0;
+#else
+ rexw = P_REXW;
+#endif
+#if defined(CONFIG_SOFTMMU)
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
+
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
+
+ tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */
+ tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+
+ tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */
+ tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
+ tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
+ tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* lea offset(r1, env), r1 */
+ tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_read));
+
+ /* cmp 0(r1), r0 */
+ tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
+
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
+
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* XXX: move that code at the end of the TB */
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RSI, mem_index);
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+
+ switch(opc) {
+ case 0 | 4:
+ /* movsbq */
+ tcg_out_modrm(s, 0xbe | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
+ break;
+ case 1 | 4:
+ /* movswq */
+ tcg_out_modrm(s, 0xbf | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
+ break;
+ case 2 | 4:
+ /* movslq */
+ tcg_out_modrm(s, 0x63 | P_REXW, data_reg, TCG_REG_RAX);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ /* movl */
+ tcg_out_modrm(s, 0x8b, data_reg, TCG_REG_RAX);
+ break;
+ case 3:
+ tcg_out_mov(s, data_reg, TCG_REG_RAX);
+ break;
+ }
+
+ /* jmp label2 */
+ tcg_out8(s, 0xeb);
+ label2_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label1: */
+ *label1_ptr = s->code_ptr - label1_ptr - 1;
+
+ /* add x(r1), r0 */
+ tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_read));
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 1;
+#else
+ bswap = 0;
+#endif
+ switch(opc) {
+ case 0:
+ /* movzbl */
+ tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
+ break;
+ case 0 | 4:
+ /* movsbX */
+ tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, 0);
+ break;
+ case 1:
+ /* movzwl */
+ tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
+ if (bswap) {
+ /* rolw $8, data_reg */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm(s, 0xc1, 0, data_reg);
+ tcg_out8(s, 8);
+ }
+ break;
+ case 1 | 4:
+ if (bswap) {
+ /* movzwl */
+ tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
+ /* rolw $8, data_reg */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm(s, 0xc1, 0, data_reg);
+ tcg_out8(s, 8);
+
+ /* movswX data_reg, data_reg */
+ tcg_out_modrm(s, 0xbf | P_EXT | rexw, data_reg, data_reg);
+ } else {
+ /* movswX */
+ tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, 0);
+ }
+ break;
+ case 2:
+ /* movl (r0), data_reg */
+ tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
+ if (bswap) {
+ /* bswap */
+ tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0);
+ }
+ break;
+ case 2 | 4:
+ if (bswap) {
+ /* movl (r0), data_reg */
+ tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
+ /* bswap */
+ tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0);
+ /* movslq */
+ tcg_out_modrm(s, 0x63 | P_REXW, data_reg, data_reg);
+ } else {
+ /* movslq */
+ tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, 0);
+ }
+ break;
+ case 3:
+ /* movq (r0), data_reg */
+ tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, 0);
+ if (bswap) {
+ /* bswap */
+ tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT | P_REXW, 0, data_reg, 0);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = s->code_ptr - label2_ptr - 1;
+#endif
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
+#if defined(CONFIG_SOFTMMU)
+ uint8_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+ s_bits = opc;
+
+ r0 = TCG_REG_RDI;
+ r1 = TCG_REG_RSI;
+
+#if TARGET_LONG_BITS == 32
+ rexw = 0;
+#else
+ rexw = P_REXW;
+#endif
+#if defined(CONFIG_SOFTMMU)
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
+
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
+
+ tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */
+ tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+
+ tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */
+ tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
+ tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
+ tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* lea offset(r1, env), r1 */
+ tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_write));
+
+ /* cmp 0(r1), r0 */
+ tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
+
+ /* mov */
+ tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
+
+ /* je label1 */
+ tcg_out8(s, 0x70 + JCC_JE);
+ label1_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* XXX: move that code at the end of the TB */
+ switch(opc) {
+ case 0:
+ /* movzbl */
+ tcg_out_modrm(s, 0xb6 | P_EXT | P_REXB, TCG_REG_RSI, data_reg);
+ break;
+ case 1:
+ /* movzwl */
+ tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_RSI, data_reg);
+ break;
+ case 2:
+ /* movl */
+ tcg_out_modrm(s, 0x8b, TCG_REG_RSI, data_reg);
+ break;
+ default:
+ case 3:
+ tcg_out_mov(s, TCG_REG_RSI, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
+ (tcg_target_long)s->code_ptr - 4);
+
+ /* jmp label2 */
+ tcg_out8(s, 0xeb);
+ label2_ptr = s->code_ptr;
+ s->code_ptr++;
+
+ /* label1: */
+ *label1_ptr = s->code_ptr - label1_ptr - 1;
+
+ /* add x(r1), r0 */
+ tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_write));
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 1;
+#else
+ bswap = 0;
+#endif
+ switch(opc) {
+ case 0:
+ /* movb */
+ tcg_out_modrm_offset(s, 0x88 | P_REXB, data_reg, r0, 0);
+ break;
+ case 1:
+ if (bswap) {
+ tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */
+ tcg_out8(s, 0x66); /* rolw $8, %ecx */
+ tcg_out_modrm(s, 0xc1, 0, r1);
+ tcg_out8(s, 8);
+ data_reg = r1;
+ }
+ /* movw */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
+ break;
+ case 2:
+ if (bswap) {
+ tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */
+ /* bswap data_reg */
+ tcg_out_opc(s, (0xc8 + r1) | P_EXT, 0, r1, 0);
+ data_reg = r1;
+ }
+ /* movl */
+ tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out_mov(s, r1, data_reg);
+ /* bswap data_reg */
+ tcg_out_opc(s, (0xc8 + r1) | P_EXT | P_REXW, 0, r1, 0);
+ data_reg = r1;
+ }
+ /* movq */
+ tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, 0);
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = s->code_ptr - label2_ptr - 1;
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch(opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, args[0]);
+ tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
+ tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ tcg_out8(s, 0xe9); /* jmp im */
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out32(s, 0);
+ } else {
+ /* indirect jump method */
+ /* jmp Ev */
+ tcg_out_modrm_offset(s, 0xff, 4, -1,
+ (tcg_target_long)(s->tb_next +
+ args[0]));
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0]) {
+ tcg_out8(s, 0xe8);
+ tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
+ } else {
+ tcg_out_modrm(s, 0xff, 2, args[0]);
+ }
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out8(s, 0xe9);
+ tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
+ } else {
+ tcg_out_modrm(s, 0xff, 4, args[0]);
+ }
+ break;
+ case INDEX_op_br:
+ tcg_out_jxx(s, JCC_JMP, args[0]);
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
+ break;
+ case INDEX_op_movi_i64:
+ tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ /* movzbl */
+ tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i32:
+ /* movsbl */
+ tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i64:
+ /* movsbq */
+ tcg_out_modrm_offset(s, 0xbe | P_EXT | P_REXW, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ /* movzwl */
+ tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i32:
+ /* movswl */
+ tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i64:
+ /* movswq */
+ tcg_out_modrm_offset(s, 0xbf | P_EXT | P_REXW, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ /* movl */
+ tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld32s_i64:
+ /* movslq */
+ tcg_out_modrm_offset(s, 0x63 | P_REXW, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i64:
+ /* movq */
+ tcg_out_modrm_offset(s, 0x8b | P_REXW, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ /* movb */
+ tcg_out_modrm_offset(s, 0x88 | P_REXB, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ /* movw */
+ tcg_out8(s, 0x66);
+ tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ /* movl */
+ tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i64:
+ /* movq */
+ tcg_out_modrm_offset(s, 0x89 | P_REXW, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith32;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith32;
+ case INDEX_op_or_i32:
+ c = ARITH_OR;
+ goto gen_arith32;
+ case INDEX_op_xor_i32:
+ c = ARITH_XOR;
+ goto gen_arith32;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ gen_arith32:
+ if (const_args[2]) {
+ tgen_arithi32(s, c, args[0], args[2]);
+ } else {
+ tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
+ }
+ break;
+
+ case INDEX_op_sub_i64:
+ c = ARITH_SUB;
+ goto gen_arith64;
+ case INDEX_op_and_i64:
+ c = ARITH_AND;
+ goto gen_arith64;
+ case INDEX_op_or_i64:
+ c = ARITH_OR;
+ goto gen_arith64;
+ case INDEX_op_xor_i64:
+ c = ARITH_XOR;
+ goto gen_arith64;
+ case INDEX_op_add_i64:
+ c = ARITH_ADD;
+ gen_arith64:
+ if (const_args[2]) {
+ tgen_arithi64(s, c, args[0], args[2]);
+ } else {
+ tcg_out_modrm(s, 0x01 | (c << 3) | P_REXW, args[2], args[0]);
+ }
+ break;
+
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ int32_t val;
+ val = args[2];
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x6b, args[0], args[0]);
+ tcg_out8(s, val);
+ } else {
+ tcg_out_modrm(s, 0x69, args[0], args[0]);
+ tcg_out32(s, val);
+ }
+ } else {
+ tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
+ }
+ break;
+ case INDEX_op_mul_i64:
+ if (const_args[2]) {
+ int32_t val;
+ val = args[2];
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, 0x6b | P_REXW, args[0], args[0]);
+ tcg_out8(s, val);
+ } else {
+ tcg_out_modrm(s, 0x69 | P_REXW, args[0], args[0]);
+ tcg_out32(s, val);
+ }
+ } else {
+ tcg_out_modrm(s, 0xaf | P_EXT | P_REXW, args[0], args[2]);
+ }
+ break;
+ case INDEX_op_div2_i32:
+ tcg_out_modrm(s, 0xf7, 7, args[4]);
+ break;
+ case INDEX_op_divu2_i32:
+ tcg_out_modrm(s, 0xf7, 6, args[4]);
+ break;
+ case INDEX_op_div2_i64:
+ tcg_out_modrm(s, 0xf7 | P_REXW, 7, args[4]);
+ break;
+ case INDEX_op_divu2_i64:
+ tcg_out_modrm(s, 0xf7 | P_REXW, 6, args[4]);
+ break;
+
+ case INDEX_op_shl_i32:
+ c = SHIFT_SHL;
+ gen_shift32:
+ if (const_args[2]) {
+ if (args[2] == 1) {
+ tcg_out_modrm(s, 0xd1, c, args[0]);
+ } else {
+ tcg_out_modrm(s, 0xc1, c, args[0]);
+ tcg_out8(s, args[2]);
+ }
+ } else {
+ tcg_out_modrm(s, 0xd3, c, args[0]);
+ }
+ break;
+ case INDEX_op_shr_i32:
+ c = SHIFT_SHR;
+ goto gen_shift32;
+ case INDEX_op_sar_i32:
+ c = SHIFT_SAR;
+ goto gen_shift32;
+
+ case INDEX_op_shl_i64:
+ c = SHIFT_SHL;
+ gen_shift64:
+ if (const_args[2]) {
+ if (args[2] == 1) {
+ tcg_out_modrm(s, 0xd1 | P_REXW, c, args[0]);
+ } else {
+ tcg_out_modrm(s, 0xc1 | P_REXW, c, args[0]);
+ tcg_out8(s, args[2]);
+ }
+ } else {
+ tcg_out_modrm(s, 0xd3 | P_REXW, c, args[0]);
+ }
+ break;
+ case INDEX_op_shr_i64:
+ c = SHIFT_SHR;
+ goto gen_shift64;
+ case INDEX_op_sar_i64:
+ c = SHIFT_SAR;
+ goto gen_shift64;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
+ args[3], 0);
+ break;
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
+ args[3], P_REXW);
+ break;
+
+ case INDEX_op_bswap_i32:
+ tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT, 0, args[0], 0);
+ break;
+ case INDEX_op_bswap_i64:
+ tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT | P_REXW, 0, args[0], 0);
+ break;
+
+ case INDEX_op_neg_i32:
+ tcg_out_modrm(s, 0xf7, 3, args[0]);
+ break;
+ case INDEX_op_neg_i64:
+ tcg_out_modrm(s, 0xf7 | P_REXW, 3, args[0]);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ tcg_out_modrm(s, 0xbe | P_EXT | P_REXB, args[0], args[1]);
+ break;
+ case INDEX_op_ext16s_i32:
+ tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]);
+ break;
+ case INDEX_op_ext8s_i64:
+ tcg_out_modrm(s, 0xbe | P_EXT | P_REXW, args[0], args[1]);
+ break;
+ case INDEX_op_ext16s_i64:
+ tcg_out_modrm(s, 0xbf | P_EXT | P_REXW, args[0], args[1]);
+ break;
+ case INDEX_op_ext32s_i64:
+ tcg_out_modrm(s, 0x63 | P_REXW, args[0], args[1]);
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld(s, args, 2 | 4);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+ default:
+ tcg_abort();
+ }
+}
+
+static int tcg_target_callee_save_regs[] = {
+ TCG_REG_RBP,
+ TCG_REG_RBX,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ /* TCG_REG_R14, */ /* currently used for the global env, so no
+ need to save */
+ TCG_REG_R15,
+};
+
+static inline void tcg_out_push(TCGContext *s, int reg)
+{
+ tcg_out_opc(s, (0x50 + (reg & 7)), 0, reg, 0);
+}
+
+static inline void tcg_out_pop(TCGContext *s, int reg)
+{
+ tcg_out_opc(s, (0x58 + (reg & 7)), 0, reg, 0);
+}
+
+/* Generate global QEMU prologue and epilogue code */
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ int i, frame_size, push_size, stack_addend;
+
+ /* TB prologue */
+ /* save all callee saved registers */
+ for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_push(s, tcg_target_callee_save_regs[i]);
+
+ }
+ /* reserve some stack space */
+ push_size = 8 + ARRAY_SIZE(tcg_target_callee_save_regs) * 8;
+ frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
+ frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
+ ~(TCG_TARGET_STACK_ALIGN - 1);
+ stack_addend = frame_size - push_size;
+ tcg_out_addi(s, TCG_REG_RSP, -stack_addend);
+
+ tcg_out_modrm(s, 0xff, 4, TCG_REG_RDI); /* jmp *%rdi */
+
+ /* TB epilogue */
+ tb_ret_addr = s->code_ptr;
+ tcg_out_addi(s, TCG_REG_RSP, stack_addend);
+ for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
+ tcg_out_pop(s, tcg_target_callee_save_regs[i]);
+ }
+ tcg_out8(s, 0xc3); /* ret */
+}
+
+static const TCGTargetOpDef x86_64_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } }, /* XXX: might need a specific constant constraint */
+ { INDEX_op_jmp, { "ri" } }, /* XXX: might need a specific constant constraint */
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "0", "ri" } },
+ { INDEX_op_mul_i32, { "r", "0", "ri" } },
+ { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_sub_i32, { "r", "0", "ri" } },
+ { INDEX_op_and_i32, { "r", "0", "ri" } },
+ { INDEX_op_or_i32, { "r", "0", "ri" } },
+ { INDEX_op_xor_i32, { "r", "0", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "0", "ci" } },
+ { INDEX_op_shr_i32, { "r", "0", "ci" } },
+ { INDEX_op_sar_i32, { "r", "0", "ci" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+
+ { INDEX_op_add_i64, { "r", "0", "re" } },
+ { INDEX_op_mul_i64, { "r", "0", "re" } },
+ { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_sub_i64, { "r", "0", "re" } },
+ { INDEX_op_and_i64, { "r", "0", "reZ" } },
+ { INDEX_op_or_i64, { "r", "0", "re" } },
+ { INDEX_op_xor_i64, { "r", "0", "re" } },
+
+ { INDEX_op_shl_i64, { "r", "0", "ci" } },
+ { INDEX_op_shr_i64, { "r", "0", "ci" } },
+ { INDEX_op_sar_i64, { "r", "0", "ci" } },
+
+ { INDEX_op_brcond_i64, { "r", "re" } },
+
+ { INDEX_op_bswap_i32, { "r", "0" } },
+ { INDEX_op_bswap_i64, { "r", "0" } },
+
+ { INDEX_op_neg_i32, { "r", "0" } },
+ { INDEX_op_neg_i64, { "r", "0" } },
+
+ { INDEX_op_ext8s_i32, { "r", "r"} },
+ { INDEX_op_ext16s_i32, { "r", "r"} },
+ { INDEX_op_ext8s_i64, { "r", "r"} },
+ { INDEX_op_ext16s_i64, { "r", "r"} },
+ { INDEX_op_ext32s_i64, { "r", "r"} },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
+ tcg_abort();
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_RDI) |
+ (1 << TCG_REG_RSI) |
+ (1 << TCG_REG_RDX) |
+ (1 << TCG_REG_RCX) |
+ (1 << TCG_REG_R8) |
+ (1 << TCG_REG_R9) |
+ (1 << TCG_REG_RAX) |
+ (1 << TCG_REG_R10) |
+ (1 << TCG_REG_R11));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RSP);
+
+ tcg_add_target_add_op_defs(x86_64_op_defs);
+}
diff --git a/tcg/x86_64/tcg-target.h b/tcg/x86_64/tcg-target.h
new file mode 100644
index 0000000..9a0cca0
--- /dev/null
+++ b/tcg/x86_64/tcg-target.h
@@ -0,0 +1,77 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_X86_64 1
+
+#define TCG_TARGET_REG_BITS 64
+//#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 16
+
+enum {
+ TCG_REG_RAX = 0,
+ TCG_REG_RCX,
+ TCG_REG_RDX,
+ TCG_REG_RBX,
+ TCG_REG_RSP,
+ TCG_REG_RBP,
+ TCG_REG_RSI,
+ TCG_REG_RDI,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+};
+
+#define TCG_CT_CONST_S32 0x100
+#define TCG_CT_CONST_U32 0x200
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_RSP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+/* optional instructions */
+#define TCG_TARGET_HAS_bswap_i32
+#define TCG_TARGET_HAS_bswap_i64
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_neg_i64
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+
+/* Note: must be synced with dyngen-exec.h */
+#define TCG_AREG0 TCG_REG_R14
+#define TCG_AREG1 TCG_REG_R15
+#define TCG_AREG2 TCG_REG_R12
+#define TCG_AREG3 TCG_REG_R13
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+}