aboutsummaryrefslogtreecommitdiffstats
path: root/tcg
diff options
context:
space:
mode:
authorMarcus Comstedt <marcus@mc.pp.se>2010-10-16 14:01:07 +0200
committerMarcus Comstedt <marcus@mc.pp.se>2011-01-27 23:04:37 +0100
commit74bdaadb718584b216e29c13b9e1226c9f77205e (patch)
treed12373d106a4e3e8dd19acdde539935975b3db93 /tcg
parent316669d58104cb260e2ffa1848f24547b71af49c (diff)
downloadexternal_qemu-74bdaadb718584b216e29c13b9e1226c9f77205e.zip
external_qemu-74bdaadb718584b216e29c13b9e1226c9f77205e.tar.gz
external_qemu-74bdaadb718584b216e29c13b9e1226c9f77205e.tar.bz2
Upstream: Integrate TCG changes for remaining arches
Commit b93177278 integrated upstreams TCG changes for i386 and x86_64. This commit pulls the equivalent changes for the remaining arches. Change-Id: I88a8a0e6124de841bbccba5840b19dd5612a56ed
Diffstat (limited to 'tcg')
-rw-r--r--tcg/arm/tcg-target.c1708
-rw-r--r--tcg/arm/tcg-target.h99
-rw-r--r--tcg/hppa/tcg-target.c974
-rw-r--r--tcg/hppa/tcg-target.h203
-rw-r--r--tcg/ppc/tcg-target.c1836
-rw-r--r--tcg/ppc/tcg-target.h97
-rw-r--r--tcg/ppc64/tcg-target.c1703
-rw-r--r--tcg/ppc64/tcg-target.h104
-rw-r--r--tcg/sparc/tcg-target.c1563
-rw-r--r--tcg/sparc/tcg-target.h146
10 files changed, 8433 insertions, 0 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
new file mode 100644
index 0000000..f8d626d
--- /dev/null
+++ b/tcg/arm/tcg-target.c
@@ -0,0 +1,1708 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Andrzej Zaborowski
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%r0",
+ "%r1",
+ "%r2",
+ "%r3",
+ "%r4",
+ "%r5",
+ "%r6",
+ "%r7",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+};
+
+static const int tcg_target_call_iarg_regs[4] = {
+ TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
+};
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R0, TCG_REG_R1
+};
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ switch (type) {
+ case R_ARM_ABS32:
+ *(uint32_t *) code_ptr = value;
+ break;
+
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
+ default:
+ tcg_abort();
+
+ case R_ARM_PC24:
+ *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
+ (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
+ break;
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 4;
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'I':
+ ct->ct |= TCG_CT_CONST_ARM;
+ break;
+
+ case 'r':
+#ifndef CONFIG_SOFTMMU
+ case 'd':
+ case 'D':
+ case 'x':
+ case 'X':
+#endif
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ break;
+
+#ifdef CONFIG_SOFTMMU
+ /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
+ case 'x':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ /* qemu_ld64 data_reg */
+ case 'd':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r1 is still needed to load data_reg2, so don't use it. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ /* qemu_ld/st64 data_reg2 */
+ case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r0, r1 and optionally r2 will be overwritten by the address
+ * and the low word of data, so don't use these. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+# if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
+# endif
+ break;
+
+# if TARGET_LONG_BITS == 64
+ /* qemu_ld/st addr_reg2 */
+ case 'X':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ /* r0 will be overwritten by the low word of base, so don't use it. */
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+# endif
+#endif
+
+ case '1':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ break;
+
+ case '2':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
+ break;
+
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+
+ return 0;
+}
+
+static inline uint32_t rotl(uint32_t val, int n)
+{
+ return (val << n) | (val >> (32 - n));
+}
+
+/* ARM immediates for ALU instructions are made of an unsigned 8-bit
+ right-rotated by an even amount between 0 and 30. */
+static inline int encode_imm(uint32_t imm)
+{
+ int shift;
+
+ /* simple case, only lower bits */
+ if ((imm & ~0xff) == 0)
+ return 0;
+ /* then try a simple even shift */
+ shift = ctz32(imm) & ~1;
+ if (((imm >> shift) & ~0xff) == 0)
+ return 32 - shift;
+ /* now try harder with rotations */
+ if ((rotl(imm, 2) & ~0xff) == 0)
+ return 2;
+ if ((rotl(imm, 4) & ~0xff) == 0)
+ return 4;
+ if ((rotl(imm, 6) & ~0xff) == 0)
+ return 6;
+ /* imm can't be encoded */
+ return -1;
+}
+
+static inline int check_fit_imm(uint32_t imm)
+{
+ return encode_imm(imm) >= 0;
+}
+
+/* Test if a constant matches the constraint.
+ * TODO: define constraints for:
+ *
+ * ldr/str offset: between -0xfff and 0xfff
+ * ldrh/strh offset: between -0xff and 0xff
+ * mov operand2: values represented with x << (2 * y), x < 0x100
+ * add, sub, eor...: ditto
+ */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
+ return 1;
+ else
+ return 0;
+}
+
+enum arm_data_opc_e {
+ ARITH_AND = 0x0,
+ ARITH_EOR = 0x1,
+ ARITH_SUB = 0x2,
+ ARITH_RSB = 0x3,
+ ARITH_ADD = 0x4,
+ ARITH_ADC = 0x5,
+ ARITH_SBC = 0x6,
+ ARITH_RSC = 0x7,
+ ARITH_TST = 0x8,
+ ARITH_CMP = 0xa,
+ ARITH_CMN = 0xb,
+ ARITH_ORR = 0xc,
+ ARITH_MOV = 0xd,
+ ARITH_BIC = 0xe,
+ ARITH_MVN = 0xf,
+};
+
+#define TO_CPSR(opc) \
+ ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
+
+#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
+#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
+#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
+#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
+#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
+#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
+#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
+#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
+
+enum arm_cond_code_e {
+ COND_EQ = 0x0,
+ COND_NE = 0x1,
+ COND_CS = 0x2, /* Unsigned greater or equal */
+ COND_CC = 0x3, /* Unsigned less than */
+ COND_MI = 0x4, /* Negative */
+ COND_PL = 0x5, /* Zero or greater */
+ COND_VS = 0x6, /* Overflow */
+ COND_VC = 0x7, /* No overflow */
+ COND_HI = 0x8, /* Unsigned greater than */
+ COND_LS = 0x9, /* Unsigned less or equal */
+ COND_GE = 0xa,
+ COND_LT = 0xb,
+ COND_GT = 0xc,
+ COND_LE = 0xd,
+ COND_AL = 0xe,
+};
+
+static const uint8_t tcg_cond_to_arm_cond[10] = {
+ [TCG_COND_EQ] = COND_EQ,
+ [TCG_COND_NE] = COND_NE,
+ [TCG_COND_LT] = COND_LT,
+ [TCG_COND_GE] = COND_GE,
+ [TCG_COND_LE] = COND_LE,
+ [TCG_COND_GT] = COND_GT,
+ /* unsigned */
+ [TCG_COND_LTU] = COND_CC,
+ [TCG_COND_GEU] = COND_CS,
+ [TCG_COND_LEU] = COND_LS,
+ [TCG_COND_GTU] = COND_HI,
+};
+
+static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
+{
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+}
+
+static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
+{
+ tcg_out32(s, (cond << 28) | 0x0a000000 |
+ (((offset - 8) >> 2) & 0x00ffffff));
+}
+
+static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ tcg_out8(s, (cond << 4) | 0x0a);
+ s->code_ptr += 3;
+#else
+ s->code_ptr += 3;
+ tcg_out8(s, (cond << 4) | 0x0a);
+#endif
+}
+
+static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
+{
+ tcg_out32(s, (cond << 28) | 0x0b000000 |
+ (((offset - 8) >> 2) & 0x00ffffff));
+}
+
+static inline void tcg_out_dat_reg(TCGContext *s,
+ int cond, int opc, int rd, int rn, int rm, int shift)
+{
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
+ (rn << 16) | (rd << 12) | shift | rm);
+}
+
+static inline void tcg_out_dat_reg2(TCGContext *s,
+ int cond, int opc0, int opc1, int rd0, int rd1,
+ int rn0, int rn1, int rm0, int rm1, int shift)
+{
+ if (rd0 == rn1 || rd0 == rm1) {
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
+ (rn0 << 16) | (8 << 12) | shift | rm0);
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
+ (rn1 << 16) | (rd1 << 12) | shift | rm1);
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+ } else {
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
+ (rn0 << 16) | (rd0 << 12) | shift | rm0);
+ tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
+ (rn1 << 16) | (rd1 << 12) | shift | rm1);
+ }
+}
+
+static inline void tcg_out_dat_imm(TCGContext *s,
+ int cond, int opc, int rd, int rn, int im)
+{
+ tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
+ (rn << 16) | (rd << 12) | im);
+}
+
+static inline void tcg_out_movi32(TCGContext *s,
+ int cond, int rd, int32_t arg)
+{
+ int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
+
+ /* TODO: This is very suboptimal, we can easily have a constant
+ * pool somewhere after all the instructions. */
+
+ if (arg < 0 && arg > -0x100)
+ return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
+
+ if (offset < 0x100 && offset > -0x100)
+ return offset >= 0 ?
+ tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
+ tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
+
+#ifdef __ARM_ARCH_7A__
+ /* use movw/movt */
+ /* movw */
+ tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
+ | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
+ if (arg & 0xffff0000)
+ /* movt */
+ tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
+ | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
+#else
+ tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
+ if (arg & 0x0000ff00)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 8) & 0xff) | 0xc00);
+ if (arg & 0x00ff0000)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 16) & 0xff) | 0x800);
+ if (arg & 0xff000000)
+ tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
+ ((arg >> 24) & 0xff) | 0x400);
+#endif
+}
+
+static inline void tcg_out_mul32(TCGContext *s,
+ int cond, int rd, int rs, int rm)
+{
+ if (rd != rm)
+ tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
+ (rs << 8) | 0x90 | rm);
+ else if (rd != rs)
+ tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
+ (rm << 8) | 0x90 | rs);
+ else {
+ tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
+ (rs << 8) | 0x90 | rm);
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ rd, 0, 8, SHIFT_IMM_LSL(0));
+ }
+}
+
+static inline void tcg_out_umull32(TCGContext *s,
+ int cond, int rd0, int rd1, int rs, int rm)
+{
+ if (rd0 != rm && rd1 != rm)
+ tcg_out32(s, (cond << 28) | 0x800090 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
+ else if (rd0 != rs && rd1 != rs)
+ tcg_out32(s, (cond << 28) | 0x800090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
+ else {
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0x800098 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8));
+ }
+}
+
+static inline void tcg_out_smull32(TCGContext *s,
+ int cond, int rd0, int rd1, int rs, int rm)
+{
+ if (rd0 != rm && rd1 != rm)
+ tcg_out32(s, (cond << 28) | 0xc00090 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
+ else if (rd0 != rs && rd1 != rs)
+ tcg_out32(s, (cond << 28) | 0xc00090 |
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
+ else {
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
+ tcg_out32(s, (cond << 28) | 0xc00098 |
+ (rd1 << 16) | (rd0 << 12) | (rs << 8));
+ }
+}
+
+static inline void tcg_out_ld32_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05900000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05100000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_st32_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05800000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05000000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_ld32_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07900000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st32_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07800000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+/* Register pre-increment with base writeback. */
+static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07b00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07a00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000b0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000b0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st16u_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000b0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000b0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000b0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st16u_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000b0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000f0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000f0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st16s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000f0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000f0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000f0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st16s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000f0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld8_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05d00000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05500000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_st8_12(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x05c00000 |
+ (rn << 16) | (rd << 12) | (im & 0xfff));
+ else
+ tcg_out32(s, (cond << 28) | 0x05400000 |
+ (rn << 16) | (rd << 12) | ((-im) & 0xfff));
+}
+
+static inline void tcg_out_ld8_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07d00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st8_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x07c00000 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01d000d0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x015000d0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_st8s_8(TCGContext *s, int cond,
+ int rd, int rn, tcg_target_long im)
+{
+ if (im >= 0)
+ tcg_out32(s, (cond << 28) | 0x01c000d0 |
+ (rn << 16) | (rd << 12) |
+ ((im & 0xf0) << 4) | (im & 0xf));
+ else
+ tcg_out32(s, (cond << 28) | 0x014000d0 |
+ (rn << 16) | (rd << 12) |
+ (((-im) & 0xf0) << 4) | ((-im) & 0xf));
+}
+
+static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x019000d0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_st8s_r(TCGContext *s, int cond,
+ int rd, int rn, int rm)
+{
+ tcg_out32(s, (cond << 28) | 0x018000d0 |
+ (rn << 16) | (rd << 12) | rm);
+}
+
+static inline void tcg_out_ld32u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld32_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st32(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st32_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld16u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld16u_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld16s(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld16s_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st16u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st16u_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld8u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld8_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_ld8s(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xff || offset < -0xff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_ld8s_8(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_st8u(TCGContext *s, int cond,
+ int rd, int rn, int32_t offset)
+{
+ if (offset > 0xfff || offset < -0xfff) {
+ tcg_out_movi32(s, cond, TCG_REG_R8, offset);
+ tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
+ } else
+ tcg_out_st8_12(s, cond, rd, rn, offset);
+}
+
+static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
+{
+ int32_t val;
+
+ val = addr - (tcg_target_long) s->code_ptr;
+ if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
+ tcg_out_b(s, cond, val);
+ else {
+#if 1
+ tcg_abort();
+#else
+ if (cond == COND_AL) {
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
+ } else {
+ tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
+ tcg_out_dat_reg(s, cond, ARITH_ADD,
+ 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
+ }
+#endif
+ }
+}
+
+static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
+{
+ int32_t val;
+
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
+#endif
+
+ val = addr - (tcg_target_long) s->code_ptr;
+ if (val < 0x01fffffd && val > -0x01fffffd)
+ tcg_out_bl(s, cond, val);
+ else {
+#if 1
+ tcg_abort();
+#else
+ if (cond == COND_AL) {
+ tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
+ } else {
+ tcg_out_movi32(s, cond, TCG_REG_R9, addr);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
+ tcg_out_bx(s, cond, TCG_REG_R9);
+ }
+#endif
+ }
+
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+#endif
+}
+
+static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
+{
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
+#endif
+ /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
+ tcg_out_bx(s, cond, arg);
+#ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
+#endif
+}
+
+static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out_goto(s, cond, l->u.value);
+ else if (cond == COND_AL) {
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
+ s->code_ptr += 4;
+ } else {
+ /* Probably this should be preferred even for COND_AL... */
+ tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
+ tcg_out_b_noaddr(s, cond);
+ }
+}
+
+static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
+ void *helper_div, void *helper_rem, int shift)
+{
+ int div_reg = args[0];
+ int rem_reg = args[1];
+
+ /* stmdb sp!, { r0 - r3, ip, lr } */
+ /* (Note that we need an even number of registers as per EABI) */
+ tcg_out32(s, (cond << 28) | 0x092d500f);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
+
+ tcg_out_call(s, cond, (uint32_t) helper_div);
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
+
+ /* ldmia sp, { r0 - r3, fp, lr } */
+ tcg_out32(s, (cond << 28) | 0x089d500f);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
+
+ tcg_out_call(s, cond, (uint32_t) helper_rem);
+
+ tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
+
+ /* ldr r0, [sp], #4 */
+ if (rem_reg != 0 && div_reg != 0)
+ tcg_out32(s, (cond << 28) | 0x04bd0004);
+ /* ldr r1, [sp], #4 */
+ if (rem_reg != 1 && div_reg != 1)
+ tcg_out32(s, (cond << 28) | 0x04bd1004);
+ /* ldr r2, [sp], #4 */
+ if (rem_reg != 2 && div_reg != 2)
+ tcg_out32(s, (cond << 28) | 0x04bd2004);
+ /* ldr r3, [sp], #4 */
+ if (rem_reg != 3 && div_reg != 3)
+ tcg_out32(s, (cond << 28) | 0x04bd3004);
+ /* ldr ip, [sp], #4 */
+ if (rem_reg != 12 && div_reg != 12)
+ tcg_out32(s, (cond << 28) | 0x04bdc004);
+ /* ldr lr, [sp], #4 */
+ if (rem_reg != 14 && div_reg != 14)
+ tcg_out32(s, (cond << 28) | 0x04bde004);
+}
+
+#ifdef CONFIG_SOFTMMU
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
+
+static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
+ const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2;
+#ifdef CONFIG_SOFTMMU
+ int mem_index, s_bits;
+# if TARGET_LONG_BITS == 64
+ int addr_reg2;
+# endif
+ uint32_t *label_ptr;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* suppress warning */
+ addr_reg = *args++;
+#ifdef CONFIG_SOFTMMU
+# if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+# endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ /* Should generate something like the following:
+ * shr r8, addr_reg, #TARGET_PAGE_BITS
+ * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
+ * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
+ */
+# if CPU_TLB_BITS > 8
+# error
+# endif
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND,
+ 0, 8, CPU_TLB_SIZE - 1);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
+ 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
+ /* In the
+ * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
+ * below, the offset is likely to exceed 12 bits if mem_index != 0 and
+ * not exceed otherwise, so use an
+ * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * before.
+ */
+ if (mem_index)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
+ (mem_index << (TLB_SHIFT & 1)) |
+ ((16 - (TLB_SHIFT >> 1)) << 8));
+ tcg_out_ld32_12(s, COND_AL, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_read));
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
+ 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ /* Check alignment. */
+ if (s_bits)
+ tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
+ 0, addr_reg, (1 << s_bits) - 1);
+# if TARGET_LONG_BITS == 64
+ /* XXX: possibly we could use a block data load or writeback in
+ * the first access. */
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
+ 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
+# endif
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addend));
+
+ switch (opc) {
+ case 0:
+ tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 0 | 4:
+ tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1:
+ tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1 | 4:
+ tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 2:
+ default:
+ tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 3:
+ tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
+ tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
+ break;
+ }
+
+ label_ptr = (void *) s->code_ptr;
+ tcg_out_b(s, COND_EQ, 8);
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
+# endif
+
+ /* TODO: move this code to where the constants pool will be */
+ if (addr_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, addr_reg, SHIFT_IMM_LSL(0));
+# if TARGET_LONG_BITS == 32
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
+# else
+ if (addr_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+# endif
+ tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
+ (tcg_target_long) s->code_ptr);
+
+ switch (opc) {
+ case 0 | 4:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, 0, SHIFT_IMM_LSL(24));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_ASR(24));
+ break;
+ case 1 | 4:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, 0, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_ASR(16));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ if (data_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_LSL(0));
+ break;
+ case 3:
+ if (data_reg != 0)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg, 0, 0, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ data_reg2, 0, 1, SHIFT_IMM_LSL(0));
+ break;
+ }
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
+# endif
+
+ *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
+#else /* !CONFIG_SOFTMMU */
+ if (GUEST_BASE) {
+ uint32_t offset = GUEST_BASE;
+ int i;
+ int rot;
+
+ while (offset) {
+ i = ctz32(offset) & ~1;
+ rot = ((32 - i) << 7) & 0xf00;
+
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
+ ((offset >> i) & 0xff) | rot);
+ addr_reg = 8;
+ offset &= ~(0xff << i);
+ }
+ }
+ switch (opc) {
+ case 0:
+ tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 0 | 4:
+ tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1:
+ tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1 | 4:
+ tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 2:
+ default:
+ tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 3:
+ /* TODO: use block load -
+ * check that data_reg2 > data_reg or the other way */
+ if (data_reg == addr_reg) {
+ tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
+ tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
+ } else {
+ tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
+ tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
+ }
+ break;
+ }
+#endif
+}
+
+static inline void tcg_out_qemu_st(TCGContext *s, int cond,
+ const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2;
+#ifdef CONFIG_SOFTMMU
+ int mem_index, s_bits;
+# if TARGET_LONG_BITS == 64
+ int addr_reg2;
+# endif
+ uint32_t *label_ptr;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* suppress warning */
+ addr_reg = *args++;
+#ifdef CONFIG_SOFTMMU
+# if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+# endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ /* Should generate something like the following:
+ * shr r8, addr_reg, #TARGET_PAGE_BITS
+ * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
+ * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
+ */
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND,
+ 0, 8, CPU_TLB_SIZE - 1);
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
+ 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
+ /* In the
+ * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
+ * below, the offset is likely to exceed 12 bits if mem_index != 0 and
+ * not exceed otherwise, so use an
+ * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * before.
+ */
+ if (mem_index)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
+ (mem_index << (TLB_SHIFT & 1)) |
+ ((16 - (TLB_SHIFT >> 1)) << 8));
+ tcg_out_ld32_12(s, COND_AL, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_write));
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
+ 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
+ /* Check alignment. */
+ if (s_bits)
+ tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
+ 0, addr_reg, (1 << s_bits) - 1);
+# if TARGET_LONG_BITS == 64
+ /* XXX: possibly we could use a block data load or writeback in
+ * the first access. */
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addr_write)
+ + 4);
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
+ 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
+# endif
+ tcg_out_ld32_12(s, COND_EQ, 1, 0,
+ offsetof(CPUState, tlb_table[0][0].addend));
+
+ switch (opc) {
+ case 0:
+ tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 0 | 4:
+ tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1:
+ tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 1 | 4:
+ tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 2:
+ default:
+ tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
+ break;
+ case 3:
+ tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
+ tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
+ break;
+ }
+
+ label_ptr = (void *) s->code_ptr;
+ tcg_out_b(s, COND_EQ, 8);
+
+ /* TODO: move this code to where the constants pool will be */
+ if (addr_reg)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 0, 0, addr_reg, SHIFT_IMM_LSL(0));
+# if TARGET_LONG_BITS == 32
+ switch (opc) {
+ case 0:
+ tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 1:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, 1, SHIFT_IMM_LSR(16));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 2:
+ if (data_reg != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
+ break;
+ case 3:
+ if (data_reg != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, data_reg, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg2, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ }
+# else
+ if (addr_reg2 != 1)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
+ switch (opc) {
+ case 0:
+ tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 1:
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, 2, SHIFT_IMM_LSR(16));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 2:
+ if (data_reg != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(0));
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
+ break;
+ case 3:
+ tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
+ tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
+ if (data_reg != 2)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 2, 0, data_reg, SHIFT_IMM_LSL(0));
+ if (data_reg2 != 3)
+ tcg_out_dat_reg(s, cond, ARITH_MOV,
+ 3, 0, data_reg2, SHIFT_IMM_LSL(0));
+ break;
+ }
+# endif
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
+# endif
+
+ tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
+ (tcg_target_long) s->code_ptr);
+# if TARGET_LONG_BITS == 64
+ if (opc == 3)
+ tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
+# endif
+
+# ifdef SAVE_LR
+ tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
+# endif
+
+ *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
+#else /* !CONFIG_SOFTMMU */
+ if (GUEST_BASE) {
+ uint32_t offset = GUEST_BASE;
+ int i;
+ int rot;
+
+ while (offset) {
+ i = ctz32(offset) & ~1;
+ rot = ((32 - i) << 7) & 0xf00;
+
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
+ ((offset >> i) & 0xff) | rot);
+ addr_reg = 8;
+ offset &= ~(0xff << i);
+ }
+ }
+ switch (opc) {
+ case 0:
+ tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 0 | 4:
+ tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1:
+ tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 1 | 4:
+ tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 2:
+ default:
+ tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
+ break;
+ case 3:
+ /* TODO: use block store -
+ * check that data_reg2 > data_reg or the other way */
+ tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
+ tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
+ break;
+ }
+#endif
+}
+
+static uint8_t *tb_ret_addr;
+
+static inline void tcg_out_op(TCGContext *s, int opc,
+ const TCGArg *args, const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+#ifdef SAVE_LR
+ if (args[0] >> 8)
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
+ else
+ tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
+ if (args[0] >> 8)
+ tcg_out32(s, args[0]);
+#else
+ {
+ uint8_t *ld_ptr = s->code_ptr;
+ if (args[0] >> 8)
+ tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
+ else
+ tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
+ tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
+ if (args[0] >> 8) {
+ *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
+ tcg_out32(s, args[0]);
+ }
+ }
+#endif
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* Direct jump method */
+#if defined(USE_DIRECT_JUMP)
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out_b(s, COND_AL, 8);
+#else
+ tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ tcg_out32(s, 0);
+#endif
+ } else {
+ /* Indirect jump method */
+#if 1
+ c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
+ if (c > 0xfff || c < -0xfff) {
+ tcg_out_movi32(s, COND_AL, TCG_REG_R0,
+ (tcg_target_long) (s->tb_next + args[0]));
+ tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
+ } else
+ tcg_out_ld32_12(s, COND_AL, 15, 15, c);
+#else
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
+ tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
+ tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
+#endif
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0])
+ tcg_out_call(s, COND_AL, args[0]);
+ else
+ tcg_out_callr(s, COND_AL, args[0]);
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0])
+ tcg_out_goto(s, COND_AL, args[0]);
+ else
+ tcg_out_bx(s, COND_AL, args[0]);
+ break;
+ case INDEX_op_br:
+ tcg_out_goto_label(s, COND_AL, args[0]);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st8_i32:
+ tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
+ break;
+
+ case INDEX_op_mov_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi32(s, COND_AL, args[0], args[1]);
+ break;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ goto gen_arith;
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith;
+ case INDEX_op_or_i32:
+ c = ARITH_ORR;
+ goto gen_arith;
+ case INDEX_op_xor_i32:
+ c = ARITH_EOR;
+ /* Fall through. */
+ gen_arith:
+ if (const_args[2]) {
+ int rot;
+ rot = encode_imm(args[2]);
+ tcg_out_dat_imm(s, COND_AL, c,
+ args[0], args[1], rotl(args[2], rot) | (rot << 7));
+ } else
+ tcg_out_dat_reg(s, COND_AL, c,
+ args[0], args[1], args[2], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_add2_i32:
+ tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
+ args[0], args[1], args[2], args[3],
+ args[4], args[5], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_sub2_i32:
+ tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
+ args[0], args[1], args[2], args[3],
+ args[4], args[5], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_neg_i32:
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
+ break;
+ case INDEX_op_not_i32:
+ tcg_out_dat_reg(s, COND_AL,
+ ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
+ break;
+ case INDEX_op_mul_i32:
+ tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_mulu2_i32:
+ tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
+ break;
+ case INDEX_op_div2_i32:
+ tcg_out_div_helper(s, COND_AL, args,
+ tcg_helper_div_i64, tcg_helper_rem_i64,
+ SHIFT_IMM_ASR(31));
+ break;
+ case INDEX_op_divu2_i32:
+ tcg_out_div_helper(s, COND_AL, args,
+ tcg_helper_divu_i64, tcg_helper_remu_i64,
+ SHIFT_IMM_LSR(31));
+ break;
+ /* XXX: Perhaps args[2] & 0x1f is wrong */
+ case INDEX_op_shl_i32:
+ c = const_args[2] ?
+ SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
+ goto gen_shift32;
+ case INDEX_op_shr_i32:
+ c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
+ SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
+ goto gen_shift32;
+ case INDEX_op_sar_i32:
+ c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
+ SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
+ /* Fall through. */
+ gen_shift32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
+ break;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
+ args[0], args[1], SHIFT_IMM_LSL(0));
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
+ break;
+ case INDEX_op_brcond2_i32:
+ /* The resulting conditions are:
+ * TCG_COND_EQ --> a0 == a2 && a1 == a3,
+ * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
+ * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
+ * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
+ * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
+ * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
+ */
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
+ args[1], args[3], SHIFT_IMM_LSL(0));
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
+ args[0], args[2], SHIFT_IMM_LSL(0));
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, COND_AL, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, COND_AL, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, COND_AL, args, 2);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, COND_AL, args, 3);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, COND_AL, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, COND_AL, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, COND_AL, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, COND_AL, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+#ifdef __ARM_ARCH_7A__
+ /* sxtb */
+ tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]);
+#else
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(24));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[0], SHIFT_IMM_ASR(24));
+#endif
+ break;
+ case INDEX_op_ext16s_i32:
+#ifdef __ARM_ARCH_7A__
+ /* sxth */
+ tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]);
+#else
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[1], SHIFT_IMM_LSL(16));
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
+ args[0], 0, args[0], SHIFT_IMM_ASR(16));
+#endif
+ break;
+
+ default:
+ tcg_abort();
+ }
+}
+
+static const TCGTargetOpDef arm_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ /* TODO: "r", "r", "ri" */
+ { INDEX_op_add_i32, { "r", "r", "rI" } },
+ { INDEX_op_sub_i32, { "r", "r", "rI" } },
+ { INDEX_op_mul_i32, { "r", "r", "r" } },
+ { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
+ { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
+ { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
+ { INDEX_op_and_i32, { "r", "r", "rI" } },
+ { INDEX_op_or_i32, { "r", "r", "rI" } },
+ { INDEX_op_xor_i32, { "r", "r", "rI" } },
+ { INDEX_op_neg_i32, { "r", "r" } },
+ { INDEX_op_not_i32, { "r", "r" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+ /* TODO: "r", "r", "r", "r", "ri", "ri" */
+ { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld32u, { "r", "x", "X" } },
+ { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
+
+ { INDEX_op_qemu_st8, { "x", "x", "X" } },
+ { INDEX_op_qemu_st16, { "x", "x", "X" } },
+ { INDEX_op_qemu_st32, { "x", "x", "X" } },
+ { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ /* fail safe */
+ if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
+ tcg_abort();
+
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
+ ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ ((2 << TCG_REG_R3) - 1) |
+ (1 << TCG_REG_R12) | (1 << TCG_REG_R14));
+
+ tcg_regset_clear(s->reserved_regs);
+#ifdef SAVE_LR
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
+#endif
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
+
+ tcg_add_target_add_op_defs(arm_op_defs);
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ tcg_out_st32(s, COND_AL, arg, arg1, arg2);
+}
+
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val > 0)
+ if (val < 0x100)
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
+ else
+ tcg_abort();
+ else if (val < 0) {
+ if (val > -0x100)
+ tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
+ else
+ tcg_abort();
+ }
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ tcg_out_movi32(s, COND_AL, ret, arg);
+}
+
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ /* stmdb sp!, { r9 - r11, lr } */
+ tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
+
+ tcg_out_bx(s, COND_AL, TCG_REG_R0);
+ tb_ret_addr = s->code_ptr;
+
+ /* ldmia sp!, { r9 - r11, pc } */
+ tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);
+}
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
new file mode 100644
index 0000000..5eac7bf
--- /dev/null
+++ b/tcg/arm/tcg-target.h
@@ -0,0 +1,99 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ * Copyright (c) 2008 Andrzej Zaborowski
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_ARM 1
+
+#define TCG_TARGET_REG_BITS 32
+#undef TCG_TARGET_WORDS_BIGENDIAN
+#undef TCG_TARGET_HAS_div_i32
+#undef TCG_TARGET_HAS_div_i64
+#undef TCG_TARGET_HAS_bswap32_i32
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_neg_i32
+#undef TCG_TARGET_HAS_neg_i64
+#define TCG_TARGET_HAS_not_i32
+#undef TCG_TARGET_STACK_GROWSUP
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+};
+
+#define TCG_TARGET_NB_REGS 15
+
+#define TCG_CT_CONST_ARM 0x100
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R13
+#define TCG_TARGET_STACK_ALIGN 8
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+/* optional instructions */
+// #define TCG_TARGET_HAS_div_i32
+// #define TCG_TARGET_HAS_rot_i32
+// #define TCG_TARGET_HAS_ext8s_i32
+// #define TCG_TARGET_HAS_ext16s_i32
+// #define TCG_TARGET_HAS_ext8u_i32
+// #define TCG_TARGET_HAS_ext16u_i32
+// #define TCG_TARGET_HAS_bswap16_i32
+// #define TCG_TARGET_HAS_bswap32_i32
+// #define TCG_TARGET_HAS_not_i32
+// #define TCG_TARGET_HAS_neg_i32
+// #define TCG_TARGET_HAS_andc_i32
+// #define TCG_TARGET_HAS_orc_i32
+
+#define TCG_TARGET_HAS_GUEST_BASE
+
+enum {
+ /* Note: must be synced with dyngen-exec.h */
+ TCG_AREG0 = TCG_REG_R7,
+ TCG_AREG1 = TCG_REG_R4,
+ TCG_AREG2 = TCG_REG_R5,
+};
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+#if QEMU_GNUC_PREREQ(4, 1)
+ __builtin___clear_cache((char *) start, (char *) stop);
+#else
+ register unsigned long _beg __asm ("a1") = start;
+ register unsigned long _end __asm ("a2") = stop;
+ register unsigned long _flg __asm ("a3") = 0;
+ __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
+#endif
+}
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
new file mode 100644
index 0000000..4677971
--- /dev/null
+++ b/tcg/hppa/tcg-target.c
@@ -0,0 +1,974 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%r0",
+ "%r1",
+ "%rp",
+ "%r3",
+ "%r4",
+ "%r5",
+ "%r6",
+ "%r7",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15",
+ "%r16",
+ "%r17",
+ "%r18",
+ "%r19",
+ "%r20",
+ "%r21",
+ "%r22",
+ "%r23",
+ "%r24",
+ "%r25",
+ "%r26",
+ "%dp",
+ "%ret0",
+ "%ret1",
+ "%sp",
+ "%r31",
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+
+ TCG_REG_R17,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+};
+
+static const int tcg_target_call_iarg_regs[4] = {
+ TCG_REG_R26,
+ TCG_REG_R25,
+ TCG_REG_R24,
+ TCG_REG_R23,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_RET0,
+ TCG_REG_RET1,
+};
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ switch (type) {
+ case R_PARISC_PCREL17F:
+ hppa_patch17f((uint32_t *)code_ptr, value, addend);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 4;
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'L': /* qemu_ld/st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+
+ /* TODO */
+
+ return 0;
+}
+
+#define INSN_OP(x) ((x) << 26)
+#define INSN_EXT3BR(x) ((x) << 13)
+#define INSN_EXT3SH(x) ((x) << 10)
+#define INSN_EXT4(x) ((x) << 6)
+#define INSN_EXT5(x) (x)
+#define INSN_EXT6(x) ((x) << 6)
+#define INSN_EXT7(x) ((x) << 6)
+#define INSN_EXT8A(x) ((x) << 6)
+#define INSN_EXT8B(x) ((x) << 5)
+#define INSN_T(x) (x)
+#define INSN_R1(x) ((x) << 16)
+#define INSN_R2(x) ((x) << 21)
+#define INSN_DEP_LEN(x) (32 - (x))
+#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
+#define INSN_SHDEP_P(x) ((x) << 5)
+#define INSN_COND(x) ((x) << 13)
+
+#define COND_NEVER 0
+#define COND_EQUAL 1
+#define COND_LT 2
+#define COND_LTEQ 3
+#define COND_LTU 4
+#define COND_LTUEQ 5
+#define COND_SV 6
+#define COND_OD 7
+
+
+/* Logical ADD */
+#define ARITH_ADD (INSN_OP(0x02) | INSN_EXT6(0x28))
+#define ARITH_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
+#define ARITH_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
+#define ARITH_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
+#define ARITH_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
+
+#define SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
+#define VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
+#define DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
+#define ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
+#define ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
+#define EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
+#define EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
+#define VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
+
+#define SUBI (INSN_OP(0x25))
+#define MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
+
+#define BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
+#define BLE_SR4 (INSN_OP(0x39) | (1 << 13))
+#define BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
+#define BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
+#define LDIL (INSN_OP(0x08))
+#define LDO (INSN_OP(0x0d))
+
+#define LDB (INSN_OP(0x10))
+#define LDH (INSN_OP(0x11))
+#define LDW (INSN_OP(0x12))
+#define LDWM (INSN_OP(0x13))
+
+#define STB (INSN_OP(0x18))
+#define STH (INSN_OP(0x19))
+#define STW (INSN_OP(0x1a))
+#define STWM (INSN_OP(0x1b))
+
+#define COMBT (INSN_OP(0x20))
+#define COMBF (INSN_OP(0x22))
+
+static int lowsignext(uint32_t val, int start, int length)
+{
+ return (((val << 1) & ~(~0 << length)) |
+ ((val >> (length - 1)) & 1)) << start;
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ /* PA1.1 defines COPY as OR r,0,t */
+ tcg_out32(s, ARITH_OR | INSN_T(ret) | INSN_R1(arg) | INSN_R2(TCG_REG_R0));
+
+ /* PA2.0 defines COPY as LDO 0(r),t
+ * but hppa-dis.c is unaware of this definition */
+ /* tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(arg) | reassemble_14(0)); */
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ if (arg == (arg & 0x1fff)) {
+ tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(TCG_REG_R0) |
+ reassemble_14(arg));
+ } else {
+ tcg_out32(s, LDIL | INSN_R2(ret) |
+ reassemble_21(lrsel((uint32_t)arg, 0)));
+ if (arg & 0x7ff)
+ tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(ret) |
+ reassemble_14(rrsel((uint32_t)arg, 0)));
+ }
+}
+
+static inline void tcg_out_ld_raw(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out32(s, LDIL | INSN_R2(ret) |
+ reassemble_21(lrsel((uint32_t)arg, 0)));
+ tcg_out32(s, LDW | INSN_R1(ret) | INSN_R2(ret) |
+ reassemble_14(rrsel((uint32_t)arg, 0)));
+}
+
+static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out_ld_raw(s, ret, arg);
+}
+
+static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset,
+ int op)
+{
+ if (offset == (offset & 0xfff))
+ tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) |
+ reassemble_14(offset));
+ else {
+ fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
+ tcg_abort();
+ }
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ fprintf(stderr, "unimplemented %s\n", __func__);
+ tcg_abort();
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ fprintf(stderr, "unimplemented %s\n", __func__);
+ tcg_abort();
+}
+
+static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
+{
+ tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
+}
+
+static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
+ tcg_target_long val, int op)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, val);
+ tcg_out_arith(s, t, r1, TCG_REG_R20, op);
+}
+
+static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
+}
+
+static inline void tcg_out_nop(TCGContext *s)
+{
+ tcg_out32(s, ARITH_OR | INSN_T(TCG_REG_R0) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(TCG_REG_R0));
+}
+
+static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) {
+ tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+}
+
+static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) {
+ tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+}
+
+static inline void tcg_out_bswap16(TCGContext *s, int ret, int arg) {
+ if(ret != arg)
+ tcg_out_mov(s, ret, arg);
+ tcg_out32(s, DEP | INSN_R2(ret) | INSN_R1(ret) |
+ INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
+ tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(ret) | INSN_SHDEP_CP(8));
+}
+
+static inline void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) {
+ tcg_out32(s, SHD | INSN_T(temp) | INSN_R1(arg) |
+ INSN_R2(arg) | INSN_SHDEP_CP(16));
+ tcg_out32(s, DEP | INSN_R2(temp) | INSN_R1(temp) |
+ INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
+ tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(arg) |
+ INSN_R2(temp) | INSN_SHDEP_CP(8));
+}
+
+static inline void tcg_out_call(TCGContext *s, void *func)
+{
+ uint32_t val = (uint32_t)__canonicalize_funcptr_for_compare(func);
+ tcg_out32(s, LDIL | INSN_R2(TCG_REG_R20) |
+ reassemble_21(lrsel(val, 0)));
+ tcg_out32(s, BLE_SR4 | INSN_R2(TCG_REG_R20) |
+ reassemble_17(rrsel(val, 0) >> 2));
+ tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* suppress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ r0 = TCG_REG_R26;
+ r1 = TCG_REG_R25;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
+ INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+
+ tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
+ ARITH_AND);
+
+ tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
+ tcg_out_arithi(s, r1, r1,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_read),
+ ARITH_ADD);
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
+
+#if TARGET_LONG_BITS == 32
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+#else
+ /* if not equal, jump to label3 */
+ label3_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
+
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_nop(s); /* delay slot */
+
+ /* label3: */
+ *label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
+#endif
+
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R25, mem_index);
+#else
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+ tcg_out_mov(s, TCG_REG_R25, addr_reg2);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
+#endif
+
+ tcg_out_call(s, qemu_ld_helpers[s_bits]);
+
+ switch(opc) {
+ case 0 | 4:
+ tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
+ break;
+ case 1 | 4:
+ tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ default:
+ tcg_out_mov(s, data_reg, TCG_REG_RET0);
+ break;
+ case 3:
+ tcg_abort();
+ tcg_out_mov(s, data_reg, TCG_REG_RET0);
+ tcg_out_mov(s, data_reg2, TCG_REG_RET1);
+ break;
+ }
+
+ /* jump to label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
+
+ /* label1: */
+ *label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
+
+ tcg_out_arithi(s, TCG_REG_R20, r1,
+ offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_read),
+ ARITH_ADD);
+ tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
+ tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out_ldst(s, data_reg, r0, 0, LDB);
+ break;
+ case 0 | 4:
+ tcg_out_ldst(s, data_reg, r0, 0, LDB);
+ tcg_out_ext8s(s, data_reg, data_reg);
+ break;
+ case 1:
+ tcg_out_ldst(s, data_reg, r0, 0, LDH);
+ if (bswap)
+ tcg_out_bswap16(s, data_reg, data_reg);
+ break;
+ case 1 | 4:
+ tcg_out_ldst(s, data_reg, r0, 0, LDH);
+ if (bswap)
+ tcg_out_bswap16(s, data_reg, data_reg);
+ tcg_out_ext16s(s, data_reg, data_reg);
+ break;
+ case 2:
+ tcg_out_ldst(s, data_reg, r0, 0, LDW);
+ if (bswap)
+ tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
+ break;
+ case 3:
+ tcg_abort();
+ if (!bswap) {
+ tcg_out_ldst(s, data_reg, r0, 0, LDW);
+ tcg_out_ldst(s, data_reg2, r0, 4, LDW);
+ } else {
+ tcg_out_ldst(s, data_reg, r0, 4, LDW);
+ tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
+ tcg_out_ldst(s, data_reg2, r0, 0, LDW);
+ tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
+#endif
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label3_ptr;
+#endif
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0; /* suppress warning */
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+
+ s_bits = opc;
+
+ r0 = TCG_REG_R26;
+ r1 = TCG_REG_R25;
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_mov(s, r1, addr_reg);
+
+ tcg_out_mov(s, r0, addr_reg);
+
+ tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
+ INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
+
+ tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
+ ARITH_AND);
+
+ tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
+ tcg_out_arithi(s, r1, r1,
+ offsetof(CPUState, tlb_table[mem_index][0].addr_write),
+ ARITH_ADD);
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
+
+#if TARGET_LONG_BITS == 32
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+#else
+ /* if not equal, jump to label3 */
+ label3_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_mov(s, r0, addr_reg); /* delay slot */
+
+ tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
+
+ /* if equal, jump to label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
+ INSN_COND(COND_EQUAL));
+ tcg_out_nop(s); /* delay slot */
+
+ /* label3: */
+ *label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
+#endif
+
+ tcg_out_mov(s, TCG_REG_R26, addr_reg);
+#if TARGET_LONG_BITS == 64
+ tcg_out_mov(s, TCG_REG_R25, addr_reg2);
+ if (opc == 3) {
+ tcg_abort();
+ tcg_out_mov(s, TCG_REG_R24, data_reg);
+ tcg_out_mov(s, TCG_REG_R23, data_reg2);
+ /* TODO: push mem_index */
+ tcg_abort();
+ } else {
+ switch(opc) {
+ case 0:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+ break;
+ case 1:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_R24, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
+ }
+#else
+ if (opc == 3) {
+ tcg_abort();
+ tcg_out_mov(s, TCG_REG_R25, data_reg);
+ tcg_out_mov(s, TCG_REG_R24, data_reg2);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
+ } else {
+ switch(opc) {
+ case 0:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
+ break;
+ case 1:
+ tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
+ INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
+ break;
+ case 2:
+ tcg_out_mov(s, TCG_REG_R25, data_reg);
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
+ }
+#endif
+ tcg_out_call(s, qemu_st_helpers[s_bits]);
+
+ /* jump to label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
+
+ /* label1: */
+ *label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
+
+ tcg_out_arithi(s, TCG_REG_R20, r1,
+ offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_write),
+ ARITH_ADD);
+ tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
+ tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
+#else
+ r0 = addr_reg;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out_ldst(s, data_reg, r0, 0, STB);
+ break;
+ case 1:
+ if (bswap) {
+ tcg_out_bswap16(s, TCG_REG_R20, data_reg);
+ data_reg = TCG_REG_R20;
+ }
+ tcg_out_ldst(s, data_reg, r0, 0, STH);
+ break;
+ case 2:
+ if (bswap) {
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
+ data_reg = TCG_REG_R20;
+ }
+ tcg_out_ldst(s, data_reg, r0, 0, STW);
+ break;
+ case 3:
+ tcg_abort();
+ if (!bswap) {
+ tcg_out_ldst(s, data_reg, r0, 0, STW);
+ tcg_out_ldst(s, data_reg2, r0, 4, STW);
+ } else {
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
+ tcg_out_ldst(s, TCG_REG_R20, r0, 4, STW);
+ tcg_out_bswap32(s, TCG_REG_R20, data_reg2, TCG_REG_R20);
+ tcg_out_ldst(s, TCG_REG_R20, r0, 0, STW);
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, args[0]);
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R18));
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ fprintf(stderr, "goto_tb direct\n");
+ tcg_abort();
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, args[0]);
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ } else {
+ /* indirect jump method */
+ tcg_out_ld_ptr(s, TCG_REG_R20,
+ (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ tcg_out32(s, BLE_SR4 | INSN_R2(args[0]));
+ tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
+ break;
+ case INDEX_op_jmp:
+ fprintf(stderr, "unimplemented jmp\n");
+ tcg_abort();
+ break;
+ case INDEX_op_br:
+ fprintf(stderr, "unimplemented br\n");
+ tcg_abort();
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
+ break;
+
+ case INDEX_op_ld8u_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDB);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDB);
+ tcg_out_ext8s(s, args[0], args[0]);
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDH);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDH);
+ tcg_out_ext16s(s, args[0], args[0]);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDW);
+ break;
+
+ case INDEX_op_st8_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STB);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STH);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_ldst(s, args[0], args[1], args[2], STW);
+ break;
+
+ case INDEX_op_sub_i32:
+ c = ARITH_SUB;
+ goto gen_arith;
+ case INDEX_op_and_i32:
+ c = ARITH_AND;
+ goto gen_arith;
+ case INDEX_op_or_i32:
+ c = ARITH_OR;
+ goto gen_arith;
+ case INDEX_op_xor_i32:
+ c = ARITH_XOR;
+ goto gen_arith;
+ case INDEX_op_add_i32:
+ c = ARITH_ADD;
+ goto gen_arith;
+
+ case INDEX_op_shl_i32:
+ tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
+ lowsignext(0x1f, 0, 11));
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
+ tcg_out32(s, ZVDEP | INSN_R2(args[0]) | INSN_R1(args[1]) |
+ INSN_DEP_LEN(32));
+ break;
+ case INDEX_op_shr_i32:
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(args[2]));
+ tcg_out32(s, VSHD | INSN_T(args[0]) | INSN_R1(TCG_REG_R0) |
+ INSN_R2(args[1]));
+ break;
+ case INDEX_op_sar_i32:
+ tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
+ lowsignext(0x1f, 0, 11));
+ tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
+ tcg_out32(s, VEXTRS | INSN_R1(args[0]) | INSN_R2(args[1]) |
+ INSN_DEP_LEN(32));
+ break;
+
+ case INDEX_op_mul_i32:
+ fprintf(stderr, "unimplemented mul\n");
+ tcg_abort();
+ break;
+ case INDEX_op_mulu2_i32:
+ fprintf(stderr, "unimplemented mulu2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_div2_i32:
+ fprintf(stderr, "unimplemented div2\n");
+ tcg_abort();
+ break;
+ case INDEX_op_divu2_i32:
+ fprintf(stderr, "unimplemented divu2\n");
+ tcg_abort();
+ break;
+
+ case INDEX_op_brcond_i32:
+ fprintf(stderr, "unimplemented brcond\n");
+ tcg_abort();
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+
+ default:
+ fprintf(stderr, "unknown opcode 0x%x\n", opc);
+ tcg_abort();
+ }
+ return;
+
+gen_arith:
+ tcg_out_arith(s, args[0], args[1], args[2], c);
+}
+
+static const TCGTargetOpDef hppa_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+
+ { INDEX_op_call, { "r" } },
+ { INDEX_op_jmp, { "r" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "r" } },
+ { INDEX_op_and_i32, { "r", "r", "r" } },
+ { INDEX_op_or_i32, { "r", "r", "r" } },
+ { INDEX_op_xor_i32, { "r", "r", "r" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "r" } },
+ { INDEX_op_shr_i32, { "r", "r", "r" } },
+ { INDEX_op_sar_i32, { "r", "r", "r" } },
+
+ { INDEX_op_brcond_i32, { "r", "r" } },
+
+#if TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L" } },
+#else
+ { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
+#endif
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R20) |
+ (1 << TCG_REG_R21) |
+ (1 << TCG_REG_R22) |
+ (1 << TCG_REG_R23) |
+ (1 << TCG_REG_R24) |
+ (1 << TCG_REG_R25) |
+ (1 << TCG_REG_R26));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
+
+ tcg_add_target_add_op_defs(hppa_op_defs);
+}
diff --git a/tcg/hppa/tcg-target.h b/tcg/hppa/tcg-target.h
new file mode 100644
index 0000000..7ab6f0c
--- /dev/null
+++ b/tcg/hppa/tcg-target.h
@@ -0,0 +1,203 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#define TCG_TARGET_HPPA 1
+
+#if defined(_PA_RISC1_1)
+#define TCG_TARGET_REG_BITS 32
+#else
+#error unsupported
+#endif
+
+#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_RP,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_DP,
+ TCG_REG_RET0,
+ TCG_REG_RET1,
+ TCG_REG_SP,
+ TCG_REG_R31,
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_STACK_GROWSUP
+
+/* optional instructions */
+//#define TCG_TARGET_HAS_ext8s_i32
+//#define TCG_TARGET_HAS_ext16s_i32
+//#define TCG_TARGET_HAS_bswap16_i32
+//#define TCG_TARGET_HAS_bswap32_i32
+
+/* Note: must be synced with dyngen-exec.h */
+#define TCG_AREG0 TCG_REG_R17
+#define TCG_AREG1 TCG_REG_R14
+#define TCG_AREG2 TCG_REG_R15
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ start &= ~31;
+ while (start <= stop)
+ {
+ asm volatile ("fdc 0(%0)\n"
+ "sync\n"
+ "fic 0(%%sr4, %0)\n"
+ "sync\n"
+ : : "r"(start) : "memory");
+ start += 32;
+ }
+}
+
+/* supplied by libgcc */
+extern void *__canonicalize_funcptr_for_compare(void *);
+
+/* Field selection types defined by hppa */
+#define rnd(x) (((x)+0x1000)&~0x1fff)
+/* lsel: select left 21 bits */
+#define lsel(v,a) (((v)+(a))>>11)
+/* rsel: select right 11 bits */
+#define rsel(v,a) (((v)+(a))&0x7ff)
+/* lrsel with rounding of addend to nearest 8k */
+#define lrsel(v,a) (((v)+rnd(a))>>11)
+/* rrsel with rounding of addend to nearest 8k */
+#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
+
+#define mask(x,sz) ((x) & ~((1<<(sz))-1))
+
+static inline int reassemble_12(int as12)
+{
+ return (((as12 & 0x800) >> 11) |
+ ((as12 & 0x400) >> 8) |
+ ((as12 & 0x3ff) << 3));
+}
+
+static inline int reassemble_14(int as14)
+{
+ return (((as14 & 0x1fff) << 1) |
+ ((as14 & 0x2000) >> 13));
+}
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+static inline int reassemble_21(int as21)
+{
+ return (((as21 & 0x100000) >> 20) |
+ ((as21 & 0x0ffe00) >> 8) |
+ ((as21 & 0x000180) << 7) |
+ ((as21 & 0x00007c) << 14) |
+ ((as21 & 0x000003) << 12));
+}
+
+static inline void hppa_patch21l(uint32_t *insn, int val, int addend)
+{
+ val = lrsel(val, addend);
+ *insn = mask(*insn, 21) | reassemble_21(val);
+}
+
+static inline void hppa_patch14r(uint32_t *insn, int val, int addend)
+{
+ val = rrsel(val, addend);
+ *insn = mask(*insn, 14) | reassemble_14(val);
+}
+
+static inline void hppa_patch17r(uint32_t *insn, int val, int addend)
+{
+ val = rrsel(val, addend);
+ *insn = (*insn & ~0x1f1ffd) | reassemble_17(val);
+}
+
+
+static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend)
+{
+ register unsigned int dp asm("r27");
+ hppa_patch21l(insn, val - dp, addend);
+}
+
+static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend)
+{
+ register unsigned int dp asm("r27");
+ hppa_patch14r(insn, val - dp, addend);
+}
+
+static inline void hppa_patch17f(uint32_t *insn, int val, int addend)
+{
+ int dot = (int)insn & ~0x3;
+ int v = ((val + addend) - dot - 8) / 4;
+ if (v > (1 << 16) || v < -(1 << 16)) {
+ printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val);
+ abort();
+ }
+ *insn = (*insn & ~0x1f1ffd) | reassemble_17(v);
+}
+
+static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend)
+{
+ /* Transform addil L'sym(%dp) to ldil L'val, %r1 */
+ *insn = 0x20200000 | reassemble_21(lrsel(val, 0));
+}
+
+static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend)
+{
+ /* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */
+ hppa_patch14r(insn, val, addend);
+ /* HACK */
+ if (addend == 0)
+ *insn = (*insn & ~0xfc000000) | (0x0d << 26);
+}
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
new file mode 100644
index 0000000..96cc461
--- /dev/null
+++ b/tcg/ppc/tcg-target.c
@@ -0,0 +1,1836 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+static uint8_t *tb_ret_addr;
+
+#ifdef _CALL_DARWIN
+#define LINKAGE_AREA_SIZE 24
+#define LR_OFFSET 8
+#elif defined _CALL_AIX
+#define LINKAGE_AREA_SIZE 52
+#define LR_OFFSET 8
+#else
+#define LINKAGE_AREA_SIZE 8
+#define LR_OFFSET 4
+#endif
+
+#define FAST_PATH
+#if TARGET_PHYS_ADDR_BITS <= 32
+#define ADDEND_OFFSET 0
+#else
+#define ADDEND_OFFSET 4
+#endif
+
+#ifndef GUEST_BASE
+#define GUEST_BASE 0
+#endif
+
+#ifdef CONFIG_USE_GUEST_BASE
+#define TCG_GUEST_BASE_REG 30
+#else
+#define TCG_GUEST_BASE_REG 0
+#endif
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r0",
+ "r1",
+ "rp",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31"
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31,
+#ifdef _CALL_DARWIN
+ TCG_REG_R2,
+#endif
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+#ifndef _CALL_DARWIN
+ TCG_REG_R11,
+#endif
+ TCG_REG_R12,
+#ifndef _CALL_SYSV
+ TCG_REG_R13,
+#endif
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R3,
+ TCG_REG_R4
+};
+
+static const int tcg_target_callee_save_regs[] = {
+#ifdef _CALL_DARWIN
+ TCG_REG_R11,
+ TCG_REG_R13,
+#endif
+#ifdef _CALL_AIX
+ TCG_REG_R13,
+#endif
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ /* TCG_REG_R27, */ /* currently used for the global env, so no
+ need to save */
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if ((disp << 6) >> 6 != disp)
+ tcg_abort ();
+
+ return disp & 0x3fffffc;
+}
+
+static void reloc_pc24 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
+ | reloc_pc24_val (pc, target);
+}
+
+static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if (disp != (int16_t) disp)
+ tcg_abort ();
+
+ return disp & 0xfffc;
+}
+
+static void reloc_pc14 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
+ | reloc_pc14_val (pc, target);
+}
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_PPC_REL14:
+ reloc_pc14 (code_ptr, value);
+ break;
+ case R_PPC_REL24:
+ reloc_pc24 (code_ptr, value);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return ARRAY_SIZE (tcg_target_call_iarg_regs);
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'A': case 'B': case 'C': case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+#ifdef CONFIG_SOFTMMU
+ case 'L': /* qemu_ld constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ break;
+ case 'K': /* qemu_st[8..32] constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
+#if TARGET_LONG_BITS == 64
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+#endif
+ break;
+ case 'M': /* qemu_st64 constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R7);
+ break;
+#else
+ case 'L':
+ case 'K':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'M':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
+ break;
+#endif
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ return 0;
+}
+
+#define OPCD(opc) ((opc)<<26)
+#define XO31(opc) (OPCD(31)|((opc)<<1))
+#define XO19(opc) (OPCD(19)|((opc)<<1))
+
+#define B OPCD(18)
+#define BC OPCD(16)
+#define LBZ OPCD(34)
+#define LHZ OPCD(40)
+#define LHA OPCD(42)
+#define LWZ OPCD(32)
+#define STB OPCD(38)
+#define STH OPCD(44)
+#define STW OPCD(36)
+
+#define ADDIC OPCD(12)
+#define ADDI OPCD(14)
+#define ADDIS OPCD(15)
+#define ORI OPCD(24)
+#define ORIS OPCD(25)
+#define XORI OPCD(26)
+#define XORIS OPCD(27)
+#define ANDI OPCD(28)
+#define ANDIS OPCD(29)
+#define MULLI OPCD( 7)
+#define CMPLI OPCD(10)
+#define CMPI OPCD(11)
+
+#define LWZU OPCD(33)
+#define STWU OPCD(37)
+
+#define RLWINM OPCD(21)
+#define RLWNM OPCD(23)
+
+#define BCLR XO19( 16)
+#define BCCTR XO19(528)
+#define CRAND XO19(257)
+#define CRANDC XO19(129)
+#define CRNAND XO19(225)
+#define CROR XO19(449)
+#define CRNOR XO19( 33)
+
+#define EXTSB XO31(954)
+#define EXTSH XO31(922)
+#define ADD XO31(266)
+#define ADDE XO31(138)
+#define ADDC XO31( 10)
+#define AND XO31( 28)
+#define SUBF XO31( 40)
+#define SUBFC XO31( 8)
+#define SUBFE XO31(136)
+#define OR XO31(444)
+#define XOR XO31(316)
+#define MULLW XO31(235)
+#define MULHWU XO31( 11)
+#define DIVW XO31(491)
+#define DIVWU XO31(459)
+#define CMP XO31( 0)
+#define CMPL XO31( 32)
+#define LHBRX XO31(790)
+#define LWBRX XO31(534)
+#define STHBRX XO31(918)
+#define STWBRX XO31(662)
+#define MFSPR XO31(339)
+#define MTSPR XO31(467)
+#define SRAWI XO31(824)
+#define NEG XO31(104)
+#define MFCR XO31( 19)
+#define CNTLZW XO31( 26)
+#define NOR XO31(124)
+#define ANDC XO31( 60)
+#define ORC XO31(412)
+
+#define LBZX XO31( 87)
+#define LHZX XO31(279)
+#define LHAX XO31(343)
+#define LWZX XO31( 23)
+#define STBX XO31(215)
+#define STHX XO31(407)
+#define STWX XO31(151)
+
+#define SPR(a,b) ((((a)<<5)|(b))<<11)
+#define LR SPR(8, 0)
+#define CTR SPR(9, 0)
+
+#define SLW XO31( 24)
+#define SRW XO31(536)
+#define SRAW XO31(792)
+
+#define TW XO31(4)
+#define TRAP (TW | TO (31))
+
+#define RT(r) ((r)<<21)
+#define RS(r) ((r)<<21)
+#define RA(r) ((r)<<16)
+#define RB(r) ((r)<<11)
+#define TO(t) ((t)<<21)
+#define SH(s) ((s)<<11)
+#define MB(b) ((b)<<6)
+#define ME(e) ((e)<<1)
+#define BO(o) ((o)<<21)
+
+#define LK 1
+
+#define TAB(t,a,b) (RT(t) | RA(a) | RB(b))
+#define SAB(s,a,b) (RS(s) | RA(a) | RB(b))
+
+#define BF(n) ((n)<<23)
+#define BI(n, c) (((c)+((n)*4))<<16)
+#define BT(n, c) (((c)+((n)*4))<<21)
+#define BA(n, c) (((c)+((n)*4))<<16)
+#define BB(n, c) (((c)+((n)*4))<<11)
+
+#define BO_COND_TRUE BO (12)
+#define BO_COND_FALSE BO (4)
+#define BO_ALWAYS BO (20)
+
+enum {
+ CR_LT,
+ CR_GT,
+ CR_EQ,
+ CR_SO
+};
+
+static const uint32_t tcg_to_bc[10] = {
+ [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+ [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+};
+
+static void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out32 (s, OR | SAB (arg, ret, arg));
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ if (arg == (int16_t) arg)
+ tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
+ else {
+ tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
+ if (arg & 0xffff)
+ tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
+ }
+}
+
+static void tcg_out_ldst (TCGContext *s, int ret, int addr,
+ int offset, int op1, int op2)
+{
+ if (offset == (int16_t) offset)
+ tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, offset);
+ tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
+ }
+}
+
+static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) s->code_ptr;
+ if ((disp << 6) >> 6 == disp)
+ tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, (tcg_target_long) target);
+ tcg_out32 (s, MTSPR | RS (0) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
+ }
+}
+
+static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
+{
+#ifdef _CALL_AIX
+ int reg;
+
+ if (const_arg) {
+ reg = 2;
+ tcg_out_movi (s, TCG_TYPE_I32, reg, arg);
+ }
+ else reg = arg;
+
+ tcg_out32 (s, LWZ | RT (0) | RA (reg));
+ tcg_out32 (s, MTSPR | RA (0) | CTR);
+ tcg_out32 (s, LWZ | RT (2) | RA (reg) | 4);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | LK);
+#else
+ if (const_arg) {
+ tcg_out_b (s, LK, arg);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (arg) | LR);
+ tcg_out32 (s, BCLR | BO_ALWAYS | LK);
+ }
+#endif
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, data_reg2, r0, r1, rbase, mem_index, s_bits, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+ s_bits = opc & 3;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+ rbase = 0;
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU
+ | RT (r1)
+ | RA (r0)
+ | offsetof (CPUState, tlb_table[mem_index][0].addr_read)
+ )
+ );
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - s_bits) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1));
+#if TARGET_LONG_BITS == 64
+ tcg_out32 (s, LWZ | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, CMP | BF (6) | RA (addr_reg2) | RB (r1));
+ tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+#endif
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I32, 4, mem_index);
+#else
+ tcg_out_mov (s, 3, addr_reg2);
+ tcg_out_mov (s, 4, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I32, 5, mem_index);
+#endif
+
+ tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
+ switch (opc) {
+ case 0|4:
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (3));
+ break;
+ case 1|4:
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (3));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ if (data_reg != 3)
+ tcg_out_mov (s, data_reg, 3);
+ break;
+ case 3:
+ if (data_reg == 3) {
+ if (data_reg2 == 4) {
+ tcg_out_mov (s, 0, 4);
+ tcg_out_mov (s, 4, 3);
+ tcg_out_mov (s, 3, 0);
+ }
+ else {
+ tcg_out_mov (s, data_reg2, 3);
+ tcg_out_mov (s, 3, 4);
+ }
+ }
+ else {
+ if (data_reg != 4) tcg_out_mov (s, data_reg, 4);
+ if (data_reg2 != 3) tcg_out_mov (s, data_reg2, 3);
+ }
+ break;
+ }
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
+ tcg_out32 (s, (LWZ
+ | RT (r0)
+ | RA (r0)
+ | (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_read))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+ r0 = addr_reg;
+ r1 = 3;
+ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+
+ switch (opc) {
+ default:
+ case 0:
+ tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0));
+ break;
+ case 0|4:
+ tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg));
+ break;
+ case 1:
+ if (bswap)
+ tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, LHZX | TAB (data_reg, rbase, r0));
+ break;
+ case 1|4:
+ if (bswap) {
+ tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LHAX | TAB (data_reg, rbase, r0));
+ break;
+ case 2:
+ if (bswap)
+ tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, LWZX | TAB (data_reg, rbase, r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, LWBRX | TAB (data_reg2, rbase, r1));
+ }
+ else {
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, LWZX | TAB (data_reg2, rbase, r0));
+ tcg_out32 (s, LWZX | TAB (data_reg, rbase, r1));
+#else
+ if (r0 == data_reg2) {
+ tcg_out32 (s, LWZ | RT (0) | RA (r0));
+ tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4);
+ tcg_out_mov (s, data_reg2, 0);
+ }
+ else {
+ tcg_out32 (s, LWZ | RT (data_reg2) | RA (r0));
+ tcg_out32 (s, LWZ | RT (data_reg) | RA (r0) | 4);
+ }
+#endif
+ }
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, r0, r1, data_reg, data_reg2, mem_index, bswap, rbase;
+#ifdef CONFIG_SOFTMMU
+ int r2, ir;
+ void *label1_ptr, *label2_ptr;
+#endif
+#if TARGET_LONG_BITS == 64
+ int addr_reg2;
+#endif
+
+ data_reg = *args++;
+ if (opc == 3)
+ data_reg2 = *args++;
+ else
+ data_reg2 = 0;
+ addr_reg = *args++;
+#if TARGET_LONG_BITS == 64
+ addr_reg2 = *args++;
+#endif
+ mem_index = *args;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+ rbase = 0;
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU
+ | RT (r1)
+ | RA (r0)
+ | offsetof (CPUState, tlb_table[mem_index][0].addr_write)
+ )
+ );
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - opc) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+
+ tcg_out32 (s, CMP | (7 << 23) | RA (r2) | RB (r1));
+#if TARGET_LONG_BITS == 64
+ tcg_out32 (s, LWZ | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, CMP | BF (6) | RA (addr_reg2) | RB (r1));
+ tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+#endif
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+#if TARGET_LONG_BITS == 32
+ tcg_out_mov (s, 3, addr_reg);
+ ir = 4;
+#else
+ tcg_out_mov (s, 3, addr_reg2);
+ tcg_out_mov (s, 4, addr_reg);
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ ir = 5;
+#else
+ ir = 4;
+#endif
+#endif
+
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, (RLWINM
+ | RA (ir)
+ | RS (data_reg)
+ | SH (0)
+ | MB (24)
+ | ME (31)));
+ break;
+ case 1:
+ tcg_out32 (s, (RLWINM
+ | RA (ir)
+ | RS (data_reg)
+ | SH (0)
+ | MB (16)
+ | ME (31)));
+ break;
+ case 2:
+ tcg_out_mov (s, ir, data_reg);
+ break;
+ case 3:
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
+ ir = 5;
+#endif
+ tcg_out_mov (s, ir++, data_reg2);
+ tcg_out_mov (s, ir, data_reg);
+ break;
+ }
+ ir++;
+
+ tcg_out_movi (s, TCG_TYPE_I32, ir, mem_index);
+ tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ tcg_out32 (s, (LWZ
+ | RT (r0)
+ | RA (r0)
+ | (ADDEND_OFFSET + offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_write))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+ r0 = addr_reg;
+ r1 = 3;
+ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, STBX | SAB (data_reg, rbase, r0));
+ break;
+ case 1:
+ if (bswap)
+ tcg_out32 (s, STHBRX | SAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, STHX | SAB (data_reg, rbase, r0));
+ break;
+ case 2:
+ if (bswap)
+ tcg_out32 (s, STWBRX | SAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, STWX | SAB (data_reg, rbase, r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, STWBRX | SAB (data_reg, rbase, r0));
+ tcg_out32 (s, STWBRX | SAB (data_reg2, rbase, r1));
+ }
+ else {
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_out32 (s, STWX | SAB (data_reg2, rbase, r0));
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, STWX | SAB (data_reg, rbase, r1));
+#else
+ tcg_out32 (s, STW | RS (data_reg2) | RA (r0));
+ tcg_out32 (s, STW | RS (data_reg) | RA (r0) | 4);
+#endif
+ }
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+void tcg_target_qemu_prologue (TCGContext *s)
+{
+ int i, frame_size;
+
+ frame_size = 0
+ + LINKAGE_AREA_SIZE
+ + TCG_STATIC_CALL_ARGS_SIZE
+ + ARRAY_SIZE (tcg_target_callee_save_regs) * 4
+ ;
+ frame_size = (frame_size + 15) & ~15;
+
+#ifdef _CALL_AIX
+ {
+ uint32_t addr;
+
+ /* First emit adhoc function descriptor */
+ addr = (uint32_t) s->code_ptr + 12;
+ tcg_out32 (s, addr); /* entry point */
+ s->code_ptr += 8; /* skip TOC and environment pointer */
+ }
+#endif
+ tcg_out32 (s, MFSPR | RT (0) | LR);
+ tcg_out32 (s, STWU | RS (1) | RA (1) | (-frame_size & 0xffff));
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (STW
+ | RS (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size + LR_OFFSET));
+
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_out_movi (s, TCG_TYPE_I32, TCG_GUEST_BASE_REG, GUEST_BASE);
+#endif
+
+ tcg_out32 (s, MTSPR | RS (3) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ tb_ret_addr = s->code_ptr;
+
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (LWZ
+ | RT (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 4 + LINKAGE_AREA_SIZE + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, LWZ | RT (0) | RA (1) | (frame_size + LR_OFFSET));
+ tcg_out32 (s, MTSPR | RS (0) | LR);
+ tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
+ tcg_out32 (s, BCLR | BO_ALWAYS);
+}
+
+static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
+ tcg_target_long arg2)
+{
+ tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
+}
+
+static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
+ tcg_target_long arg2)
+{
+ tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
+}
+
+static void ppc_addi (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ if (!si && rt == ra)
+ return;
+
+ if (si == (int16_t) si)
+ tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff));
+ else {
+ uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15);
+ tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h);
+ tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff));
+ }
+}
+
+static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ ppc_addi (s, reg, reg, val);
+}
+
+static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
+ int const_arg2, int cr)
+{
+ int imm;
+ uint32_t op;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ else if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMP;
+ imm = 0;
+ break;
+
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ if (const_arg2) {
+ if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ default:
+ tcg_abort ();
+ }
+ op |= BF (cr);
+
+ if (imm)
+ tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
+ else {
+ if (const_arg2) {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, arg2);
+ tcg_out32 (s, op | RA (arg1) | RB (0));
+ }
+ else
+ tcg_out32 (s, op | RA (arg1) | RB (arg2));
+ }
+
+}
+
+static void tcg_out_bc (TCGContext *s, int bc, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
+ else {
+ uint16_t val = *(uint16_t *) &s->code_ptr[2];
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, bc | (val & 0xfffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
+ }
+}
+
+static void tcg_out_cr7eq_from_cond (TCGContext *s, const TCGArg *args,
+ const int *const_args)
+{
+ int cond = args[4], op;
+ struct { int bit1; int bit2; int cond2; } bits[] = {
+ [TCG_COND_LT ] = { CR_LT, CR_LT, TCG_COND_LT },
+ [TCG_COND_LE ] = { CR_LT, CR_GT, TCG_COND_LT },
+ [TCG_COND_GT ] = { CR_GT, CR_GT, TCG_COND_GT },
+ [TCG_COND_GE ] = { CR_GT, CR_LT, TCG_COND_GT },
+ [TCG_COND_LTU] = { CR_LT, CR_LT, TCG_COND_LTU },
+ [TCG_COND_LEU] = { CR_LT, CR_GT, TCG_COND_LTU },
+ [TCG_COND_GTU] = { CR_GT, CR_GT, TCG_COND_GTU },
+ [TCG_COND_GEU] = { CR_GT, CR_LT, TCG_COND_GTU },
+ }, *b = &bits[cond];
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ op = (cond == TCG_COND_EQ) ? CRAND : CRNAND;
+ tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 6);
+ tcg_out_cmp (s, cond, args[1], args[3], const_args[3], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ case TCG_COND_GE:
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ case TCG_COND_GEU:
+ op = (b->bit1 != b->bit2) ? CRANDC : CRAND;
+ tcg_out_cmp (s, b->cond2, args[1], args[3], const_args[3], 5);
+ tcg_out_cmp (s, TCG_COND_EQ, args[1], args[3], const_args[3], 6);
+ tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, b->bit2));
+ tcg_out32 (s, CROR | BT (7, CR_EQ) | BA (5, b->bit1) | BB (7, CR_EQ));
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void tcg_out_setcond (TCGContext *s, int cond, TCGArg arg0,
+ TCGArg arg1, TCGArg arg2, int const_arg2)
+{
+ int crop, sh, arg;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ if (const_arg2) {
+ if (!arg2) {
+ arg = arg1;
+ }
+ else {
+ arg = 0;
+ if ((uint16_t) arg2 == arg2) {
+ tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2);
+ }
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, arg2);
+ tcg_out32 (s, XOR | SAB (arg1, 0, 0));
+ }
+ }
+ }
+ else {
+ arg = 0;
+ tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
+ }
+ tcg_out32 (s, CNTLZW | RS (arg) | RA (0));
+ tcg_out32 (s, (RLWINM
+ | RA (arg0)
+ | RS (0)
+ | SH (27)
+ | MB (5)
+ | ME (31)
+ )
+ );
+ break;
+
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if (!arg2) {
+ arg = arg1;
+ }
+ else {
+ arg = 0;
+ if ((uint16_t) arg2 == arg2) {
+ tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2);
+ }
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, arg2);
+ tcg_out32 (s, XOR | SAB (arg1, 0, 0));
+ }
+ }
+ }
+ else {
+ arg = 0;
+ tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
+ }
+
+ if (arg == arg1 && arg1 == arg0) {
+ tcg_out32 (s, ADDIC | RT (0) | RA (arg) | 0xffff);
+ tcg_out32 (s, SUBFE | TAB (arg0, 0, arg));
+ }
+ else {
+ tcg_out32 (s, ADDIC | RT (arg0) | RA (arg) | 0xffff);
+ tcg_out32 (s, SUBFE | TAB (arg0, arg0, arg));
+ }
+ break;
+
+ case TCG_COND_GT:
+ case TCG_COND_GTU:
+ sh = 30;
+ crop = 0;
+ goto crtest;
+
+ case TCG_COND_LT:
+ case TCG_COND_LTU:
+ sh = 29;
+ crop = 0;
+ goto crtest;
+
+ case TCG_COND_GE:
+ case TCG_COND_GEU:
+ sh = 31;
+ crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_LT) | BB (7, CR_LT);
+ goto crtest;
+
+ case TCG_COND_LE:
+ case TCG_COND_LEU:
+ sh = 31;
+ crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_GT) | BB (7, CR_GT);
+ crtest:
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7);
+ if (crop) tcg_out32 (s, crop);
+ tcg_out32 (s, MFCR | RT (0));
+ tcg_out32 (s, (RLWINM
+ | RA (arg0)
+ | RS (0)
+ | SH (sh)
+ | MB (31)
+ | ME (31)
+ )
+ );
+ break;
+
+ default:
+ tcg_abort ();
+ }
+}
+
+static void tcg_out_setcond2 (TCGContext *s, const TCGArg *args,
+ const int *const_args)
+{
+ tcg_out_cr7eq_from_cond (s, args + 1, const_args + 1);
+ tcg_out32 (s, MFCR | RT (0));
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (0)
+ | SH (31)
+ | MB (31)
+ | ME (31)
+ )
+ );
+}
+
+static void tcg_out_brcond (TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7);
+ tcg_out_bc (s, tcg_to_bc[cond], label_index);
+}
+
+/* XXX: we implement it at the target level to avoid having to
+ handle cross basic blocks temporaries */
+static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
+ const int *const_args)
+{
+ tcg_out_cr7eq_from_cond (s, args, const_args);
+ tcg_out_bc (s, (BC | BI (7, CR_EQ) | BO_COND_TRUE), args[5]);
+}
+
+void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
+{
+ uint32_t *ptr;
+ long disp = addr - jmp_addr;
+ unsigned long patch_size;
+
+ ptr = (uint32_t *)jmp_addr;
+
+ if ((disp << 6) >> 6 != disp) {
+ ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */
+ ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */
+ ptr[2] = 0x7c0903a6; /* mtctr 0 */
+ ptr[3] = 0x4e800420; /* brctr */
+ patch_size = 16;
+ } else {
+ /* patch the branch destination */
+ if (disp != 16) {
+ *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */
+ patch_size = 4;
+ } else {
+ ptr[0] = 0x60000000; /* nop */
+ ptr[1] = 0x60000000;
+ ptr[2] = 0x60000000;
+ ptr[3] = 0x60000000;
+ patch_size = 16;
+ }
+ }
+ /* flush icache */
+ flush_icache_range(jmp_addr, jmp_addr + patch_size);
+}
+
+static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi (s, TCG_TYPE_I32, TCG_REG_R3, args[0]);
+ tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->code_ptr += 16;
+ }
+ else {
+ tcg_abort ();
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_br:
+ {
+ TCGLabel *l = &s->labels[args[0]];
+
+ if (l->has_value) {
+ tcg_out_b (s, 0, l->u.value);
+ }
+ else {
+ uint32_t val = *(uint32_t *) s->code_ptr;
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, B | (val & 0x3fffffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
+ }
+ }
+ break;
+ case INDEX_op_call:
+ tcg_out_call (s, args[0], const_args[0]);
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out_b (s, 0, args[0]);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (args[0]) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ }
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
+ break;
+ case INDEX_op_ld16u_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
+ break;
+ case INDEX_op_ld_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
+ break;
+ case INDEX_op_st8_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
+ break;
+ case INDEX_op_st16_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
+ break;
+ case INDEX_op_st_i32:
+ tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
+ break;
+
+ case INDEX_op_add_i32:
+ if (const_args[2])
+ ppc_addi (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i32:
+ if (const_args[2])
+ ppc_addi (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_and_i32:
+ if (const_args[2]) {
+ uint32_t c;
+
+ c = args[2];
+
+ if (!c) {
+ tcg_out_movi (s, TCG_TYPE_I32, args[0], 0);
+ break;
+ }
+#ifdef __PPU__
+ uint32_t t, n;
+ int mb, me;
+
+ n = c ^ -(c & 1);
+ t = n + (n & -n);
+
+ if ((t & (t - 1)) == 0) {
+ int lzc, tzc;
+
+ if ((c & 0x80000001) == 0x80000001) {
+ lzc = clz32 (n);
+ tzc = ctz32 (n);
+
+ mb = 32 - tzc;
+ me = lzc - 1;
+ }
+ else {
+ lzc = clz32 (c);
+ tzc = ctz32 (c);
+
+ mb = lzc;
+ me = 31 - tzc;
+ }
+
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (0)
+ | MB (mb)
+ | ME (me)
+ )
+ );
+ }
+ else
+#endif /* !__PPU__ */
+ {
+ if ((c & 0xffff) == c)
+ tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | c);
+ else if ((c & 0xffff0000) == c)
+ tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0])
+ | ((c >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, c);
+ tcg_out32 (s, AND | SAB (args[1], args[0], 0));
+ }
+ }
+ }
+ else
+ tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_or_i32:
+ if (const_args[2]) {
+ if (args[2] & 0xffff) {
+ tcg_out32 (s, ORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ if (args[2] >> 16)
+ tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ else {
+ tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ }
+ else
+ tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_xor_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, XORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, XOR | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_andc_i32:
+ tcg_out32 (s, ANDC | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_orc_i32:
+ tcg_out32 (s, ORC | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ if (args[2] == (int16_t) args[2])
+ tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1])
+ | (args[2] & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], 0));
+ }
+ }
+ else
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_mulu2_i32:
+ if (args[0] == args[2] || args[0] == args[3]) {
+ tcg_out32 (s, MULLW | TAB (0, args[2], args[3]));
+ tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, MULLW | TAB (args[0], args[2], args[3]));
+ tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
+ }
+ break;
+
+ case INDEX_op_shl_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (args[2])
+ | MB (0)
+ | ME (31 - args[2])
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (32 - args[2])
+ | MB (args[2])
+ | ME (31)
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i32:
+ if (const_args[2])
+ tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
+ else
+ tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_rotl_i32:
+ {
+ int op = 0
+ | RA (args[0])
+ | RS (args[1])
+ | MB (0)
+ | ME (31)
+ | (const_args[2] ? RLWINM | SH (args[2])
+ : RLWNM | RB (args[2]))
+ ;
+ tcg_out32 (s, op);
+ }
+ break;
+ case INDEX_op_rotr_i32:
+ if (const_args[2]) {
+ if (!args[2]) {
+ tcg_out_mov (s, args[0], args[1]);
+ }
+ else {
+ tcg_out32 (s, RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (32 - args[2])
+ | MB (0)
+ | ME (31)
+ );
+ }
+ }
+ else {
+ tcg_out32 (s, ADDI | RT (0) | RA (args[2]) | 0xffe0);
+ tcg_out32 (s, RLWNM
+ | RA (args[0])
+ | RS (args[1])
+ | RB (0)
+ | MB (0)
+ | ME (31)
+ );
+ }
+ break;
+
+ case INDEX_op_add2_i32:
+ if (args[0] == args[3] || args[0] == args[5]) {
+ tcg_out32 (s, ADDC | TAB (0, args[2], args[4]));
+ tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, ADDC | TAB (args[0], args[2], args[4]));
+ tcg_out32 (s, ADDE | TAB (args[1], args[3], args[5]));
+ }
+ break;
+ case INDEX_op_sub2_i32:
+ if (args[0] == args[3] || args[0] == args[5]) {
+ tcg_out32 (s, SUBFC | TAB (0, args[4], args[2]));
+ tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3]));
+ tcg_out_mov (s, args[0], 0);
+ }
+ else {
+ tcg_out32 (s, SUBFC | TAB (args[0], args[4], args[2]));
+ tcg_out32 (s, SUBFE | TAB (args[1], args[5], args[3]));
+ }
+ break;
+
+ case INDEX_op_brcond_i32:
+ /*
+ args[0] = r0
+ args[1] = r1
+ args[2] = cond
+ args[3] = r1 is const
+ args[4] = label_index
+ */
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3]);
+ break;
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2(s, args, const_args);
+ break;
+
+ case INDEX_op_neg_i32:
+ tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
+ break;
+
+ case INDEX_op_not_i32:
+ tcg_out32 (s, NOR | SAB (args[1], args[0], args[0]));
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ tcg_out32 (s, EXTSB | RS (args[1]) | RA (args[0]));
+ break;
+ case INDEX_op_ext8u_i32:
+ tcg_out32 (s, RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (0)
+ | MB (24)
+ | ME (31)
+ );
+ break;
+ case INDEX_op_ext16s_i32:
+ tcg_out32 (s, EXTSH | RS (args[1]) | RA (args[0]));
+ break;
+ case INDEX_op_ext16u_i32:
+ tcg_out32 (s, RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (0)
+ | MB (16)
+ | ME (31)
+ );
+ break;
+
+ case INDEX_op_setcond_i32:
+ tcg_out_setcond (s, args[3], args[0], args[1], args[2], const_args[2]);
+ break;
+ case INDEX_op_setcond2_i32:
+ tcg_out_setcond2 (s, args, const_args);
+ break;
+
+ default:
+ tcg_dump_ops (s, stderr);
+ tcg_abort ();
+ }
+}
+
+static const TCGTargetOpDef ppc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "ri" } },
+ { INDEX_op_mul_i32, { "r", "r", "ri" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+ { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "ri" } },
+ { INDEX_op_and_i32, { "r", "r", "ri" } },
+ { INDEX_op_or_i32, { "r", "r", "ri" } },
+ { INDEX_op_xor_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_rotl_i32, { "r", "r", "ri" } },
+ { INDEX_op_rotr_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+
+ { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
+
+ { INDEX_op_neg_i32, { "r", "r" } },
+ { INDEX_op_not_i32, { "r", "r" } },
+
+ { INDEX_op_andc_i32, { "r", "r", "r" } },
+ { INDEX_op_orc_i32, { "r", "r", "r" } },
+
+ { INDEX_op_setcond_i32, { "r", "r", "ri" } },
+ { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
+
+#if TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "K", "K" } },
+ { INDEX_op_qemu_st16, { "K", "K" } },
+ { INDEX_op_qemu_st32, { "K", "K" } },
+ { INDEX_op_qemu_st64, { "M", "M", "M" } },
+#else
+ { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L", "L", "L" } },
+
+ { INDEX_op_qemu_st8, { "K", "K", "K" } },
+ { INDEX_op_qemu_st16, { "K", "K", "K" } },
+ { INDEX_op_qemu_st32, { "K", "K", "K" } },
+ { INDEX_op_qemu_st64, { "M", "M", "M", "M" } },
+#endif
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext8u_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+ { INDEX_op_ext16u_i32, { "r", "r" } },
+
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R0) |
+#ifdef _CALL_DARWIN
+ (1 << TCG_REG_R2) |
+#endif
+ (1 << TCG_REG_R3) |
+ (1 << TCG_REG_R4) |
+ (1 << TCG_REG_R5) |
+ (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) |
+ (1 << TCG_REG_R8) |
+ (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) |
+ (1 << TCG_REG_R11) |
+ (1 << TCG_REG_R12)
+ );
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);
+#ifndef _CALL_DARWIN
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2);
+#endif
+#ifdef _CALL_SYSV
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13);
+#endif
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+#endif
+
+ tcg_add_target_add_op_defs(ppc_op_defs);
+}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
new file mode 100644
index 0000000..0c71a11
--- /dev/null
+++ b/tcg/ppc/tcg-target.h
@@ -0,0 +1,97 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_PPC 1
+
+#define TCG_TARGET_REG_BITS 32
+#define TCG_TARGET_WORDS_BIGENDIAN
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R1
+#define TCG_TARGET_STACK_ALIGN 16
+#if defined _CALL_DARWIN
+#define TCG_TARGET_CALL_STACK_OFFSET 24
+#elif defined _CALL_AIX
+#define TCG_TARGET_CALL_STACK_OFFSET 52
+#elif defined _CALL_SYSV
+#define TCG_TARGET_CALL_ALIGN_ARGS 1
+#define TCG_TARGET_CALL_STACK_OFFSET 8
+#else
+#error Unsupported system
+#endif
+
+/* optional instructions */
+#define TCG_TARGET_HAS_div_i32
+#define TCG_TARGET_HAS_rot_i32
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+#define TCG_TARGET_HAS_ext8u_i32
+#define TCG_TARGET_HAS_ext16u_i32
+/* #define TCG_TARGET_HAS_bswap16_i32 */
+/* #define TCG_TARGET_HAS_bswap32_i32 */
+#define TCG_TARGET_HAS_not_i32
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_andc_i32
+#define TCG_TARGET_HAS_orc_i32
+
+#define TCG_AREG0 TCG_REG_R27
+#define TCG_AREG1 TCG_REG_R24
+#define TCG_AREG2 TCG_REG_R25
+
+#define TCG_TARGET_HAS_GUEST_BASE
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
new file mode 100644
index 0000000..fed179c
--- /dev/null
+++ b/tcg/ppc64/tcg-target.c
@@ -0,0 +1,1703 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#define TCG_CT_CONST_U32 0x100
+
+static uint8_t *tb_ret_addr;
+
+#define FAST_PATH
+
+#if TARGET_PHYS_ADDR_BITS == 32
+#define LD_ADDEND LWZ
+#else
+#define LD_ADDEND LD
+#endif
+
+#if TARGET_LONG_BITS == 32
+#define LD_ADDR LWZU
+#define CMP_L 0
+#else
+#define LD_ADDR LDU
+#define CMP_L (1<<21)
+#endif
+
+#ifndef GUEST_BASE
+#define GUEST_BASE 0
+#endif
+
+#ifdef CONFIG_USE_GUEST_BASE
+#define TCG_GUEST_BASE_REG 30
+#else
+#define TCG_GUEST_BASE_REG 0
+#endif
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "r0",
+ "r1",
+ "rp",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31"
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31,
+#ifdef __APPLE__
+ TCG_REG_R2,
+#endif
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+#ifndef __APPLE__
+ TCG_REG_R11,
+#endif
+ TCG_REG_R12,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_R3
+};
+
+static const int tcg_target_callee_save_regs[] = {
+#ifdef __APPLE__
+ TCG_REG_R11,
+#endif
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ /* TCG_REG_R27, */ /* currently used for the global env, so no
+ need to save */
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if ((disp << 38) >> 38 != disp)
+ tcg_abort ();
+
+ return disp & 0x3fffffc;
+}
+
+static void reloc_pc24 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
+ | reloc_pc24_val (pc, target);
+}
+
+static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) pc;
+ if (disp != (int16_t) disp)
+ tcg_abort ();
+
+ return disp & 0xfffc;
+}
+
+static void reloc_pc14 (void *pc, tcg_target_long target)
+{
+ *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
+ | reloc_pc14_val (pc, target);
+}
+
+static void patch_reloc (uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_PPC_REL14:
+ reloc_pc14 (code_ptr, value);
+ break;
+ case R_PPC_REL24:
+ reloc_pc24 (code_ptr, value);
+ break;
+ default:
+ tcg_abort ();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static int tcg_target_get_call_iarg_regs_count (int flags)
+{
+ return ARRAY_SIZE (tcg_target_call_iarg_regs);
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint (TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'A': case 'B': case 'C': case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg (ct->u.regs, 3 + ct_str[0] - 'A');
+ break;
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'L': /* qemu_ld constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
+#ifdef CONFIG_SOFTMMU
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
+#endif
+ break;
+ case 'S': /* qemu_st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
+#ifdef CONFIG_SOFTMMU
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
+ tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
+#endif
+ break;
+ case 'Z':
+ ct->ct |= TCG_CT_CONST_U32;
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static int tcg_target_const_match (tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_U32) && (val == (uint32_t) val))
+ return 1;
+ return 0;
+}
+
+#define OPCD(opc) ((opc)<<26)
+#define XO19(opc) (OPCD(19)|((opc)<<1))
+#define XO30(opc) (OPCD(30)|((opc)<<2))
+#define XO31(opc) (OPCD(31)|((opc)<<1))
+#define XO58(opc) (OPCD(58)|(opc))
+#define XO62(opc) (OPCD(62)|(opc))
+
+#define B OPCD( 18)
+#define BC OPCD( 16)
+#define LBZ OPCD( 34)
+#define LHZ OPCD( 40)
+#define LHA OPCD( 42)
+#define LWZ OPCD( 32)
+#define STB OPCD( 38)
+#define STH OPCD( 44)
+#define STW OPCD( 36)
+
+#define STD XO62( 0)
+#define STDU XO62( 1)
+#define STDX XO31(149)
+
+#define LD XO58( 0)
+#define LDX XO31( 21)
+#define LDU XO58( 1)
+#define LWA XO58( 2)
+#define LWAX XO31(341)
+
+#define ADDIC OPCD( 12)
+#define ADDI OPCD( 14)
+#define ADDIS OPCD( 15)
+#define ORI OPCD( 24)
+#define ORIS OPCD( 25)
+#define XORI OPCD( 26)
+#define XORIS OPCD( 27)
+#define ANDI OPCD( 28)
+#define ANDIS OPCD( 29)
+#define MULLI OPCD( 7)
+#define CMPLI OPCD( 10)
+#define CMPI OPCD( 11)
+
+#define LWZU OPCD( 33)
+#define STWU OPCD( 37)
+
+#define RLWINM OPCD( 21)
+
+#define RLDICL XO30( 0)
+#define RLDICR XO30( 1)
+#define RLDIMI XO30( 3)
+
+#define BCLR XO19( 16)
+#define BCCTR XO19(528)
+#define CRAND XO19(257)
+#define CRANDC XO19(129)
+#define CRNAND XO19(225)
+#define CROR XO19(449)
+#define CRNOR XO19( 33)
+
+#define EXTSB XO31(954)
+#define EXTSH XO31(922)
+#define EXTSW XO31(986)
+#define ADD XO31(266)
+#define ADDE XO31(138)
+#define ADDC XO31( 10)
+#define AND XO31( 28)
+#define SUBF XO31( 40)
+#define SUBFC XO31( 8)
+#define SUBFE XO31(136)
+#define OR XO31(444)
+#define XOR XO31(316)
+#define MULLW XO31(235)
+#define MULHWU XO31( 11)
+#define DIVW XO31(491)
+#define DIVWU XO31(459)
+#define CMP XO31( 0)
+#define CMPL XO31( 32)
+#define LHBRX XO31(790)
+#define LWBRX XO31(534)
+#define STHBRX XO31(918)
+#define STWBRX XO31(662)
+#define MFSPR XO31(339)
+#define MTSPR XO31(467)
+#define SRAWI XO31(824)
+#define NEG XO31(104)
+#define MFCR XO31( 19)
+#define CNTLZW XO31( 26)
+#define CNTLZD XO31( 58)
+
+#define MULLD XO31(233)
+#define MULHD XO31( 73)
+#define MULHDU XO31( 9)
+#define DIVD XO31(489)
+#define DIVDU XO31(457)
+
+#define LBZX XO31( 87)
+#define LHZX XO31(279)
+#define LHAX XO31(343)
+#define LWZX XO31( 23)
+#define STBX XO31(215)
+#define STHX XO31(407)
+#define STWX XO31(151)
+
+#define SPR(a,b) ((((a)<<5)|(b))<<11)
+#define LR SPR(8, 0)
+#define CTR SPR(9, 0)
+
+#define SLW XO31( 24)
+#define SRW XO31(536)
+#define SRAW XO31(792)
+
+#define SLD XO31( 27)
+#define SRD XO31(539)
+#define SRAD XO31(794)
+#define SRADI XO31(413<<1)
+
+#define TW XO31( 4)
+#define TRAP (TW | TO (31))
+
+#define RT(r) ((r)<<21)
+#define RS(r) ((r)<<21)
+#define RA(r) ((r)<<16)
+#define RB(r) ((r)<<11)
+#define TO(t) ((t)<<21)
+#define SH(s) ((s)<<11)
+#define MB(b) ((b)<<6)
+#define ME(e) ((e)<<1)
+#define BO(o) ((o)<<21)
+#define MB64(b) ((b)<<5)
+
+#define LK 1
+
+#define TAB(t,a,b) (RT(t) | RA(a) | RB(b))
+#define SAB(s,a,b) (RS(s) | RA(a) | RB(b))
+
+#define BF(n) ((n)<<23)
+#define BI(n, c) (((c)+((n)*4))<<16)
+#define BT(n, c) (((c)+((n)*4))<<21)
+#define BA(n, c) (((c)+((n)*4))<<16)
+#define BB(n, c) (((c)+((n)*4))<<11)
+
+#define BO_COND_TRUE BO (12)
+#define BO_COND_FALSE BO ( 4)
+#define BO_ALWAYS BO (20)
+
+enum {
+ CR_LT,
+ CR_GT,
+ CR_EQ,
+ CR_SO
+};
+
+static const uint32_t tcg_to_bc[10] = {
+ [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+ [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
+};
+
+static void tcg_out_mov (TCGContext *s, int ret, int arg)
+{
+ tcg_out32 (s, OR | SAB (arg, ret, arg));
+}
+
+static void tcg_out_rld (TCGContext *s, int op, int ra, int rs, int sh, int mb)
+{
+ sh = SH (sh & 0x1f) | (((sh >> 5) & 1) << 1);
+ mb = MB64 ((mb >> 5) | ((mb << 1) & 0x3f));
+ tcg_out32 (s, op | RA (ra) | RS (rs) | sh | mb);
+}
+
+static void tcg_out_movi32 (TCGContext *s, int ret, int32_t arg)
+{
+ if (arg == (int16_t) arg)
+ tcg_out32 (s, ADDI | RT (ret) | RA (0) | (arg & 0xffff));
+ else {
+ tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
+ if (arg & 0xffff)
+ tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
+ }
+}
+
+static void tcg_out_movi (TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ int32_t arg32 = arg;
+ arg = type == TCG_TYPE_I32 ? arg & 0xffffffff : arg;
+
+ if (arg == arg32) {
+ tcg_out_movi32 (s, ret, arg32);
+ }
+ else {
+ if ((uint64_t) arg >> 32) {
+ uint16_t h16 = arg >> 16;
+ uint16_t l16 = arg;
+
+ tcg_out_movi32 (s, ret, arg >> 32);
+ tcg_out_rld (s, RLDICR, ret, ret, 32, 31);
+ if (h16) tcg_out32 (s, ORIS | RS (ret) | RA (ret) | h16);
+ if (l16) tcg_out32 (s, ORI | RS (ret) | RA (ret) | l16);
+ }
+ else {
+ tcg_out_movi32 (s, ret, arg32);
+ if (arg32 < 0)
+ tcg_out_rld (s, RLDICL, ret, ret, 0, 32);
+ }
+ }
+}
+
+static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
+{
+ tcg_target_long disp;
+
+ disp = target - (tcg_target_long) s->code_ptr;
+ if ((disp << 38) >> 38 == disp)
+ tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, (tcg_target_long) target);
+ tcg_out32 (s, MTSPR | RS (0) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
+ }
+}
+
+static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
+{
+#ifdef __APPLE__
+ if (const_arg) {
+ tcg_out_b (s, LK, arg);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (arg) | LR);
+ tcg_out32 (s, BCLR | BO_ALWAYS | LK);
+ }
+#else
+ int reg;
+
+ if (const_arg) {
+ reg = 2;
+ tcg_out_movi (s, TCG_TYPE_I64, reg, arg);
+ }
+ else reg = arg;
+
+ tcg_out32 (s, LD | RT (0) | RA (reg));
+ tcg_out32 (s, MTSPR | RA (0) | CTR);
+ tcg_out32 (s, LD | RT (11) | RA (reg) | 16);
+ tcg_out32 (s, LD | RT (2) | RA (reg) | 8);
+ tcg_out32 (s, BCCTR | BO_ALWAYS | LK);
+#endif
+}
+
+static void tcg_out_ldst (TCGContext *s, int ret, int addr,
+ int offset, int op1, int op2)
+{
+ if (offset == (int16_t) offset)
+ tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, offset);
+ tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
+ }
+}
+
+static void tcg_out_ldsta (TCGContext *s, int ret, int addr,
+ int offset, int op1, int op2)
+{
+ if (offset == (int16_t) (offset & ~3))
+ tcg_out32 (s, op1 | RT (ret) | RA (addr) | (offset & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, offset);
+ tcg_out32 (s, op2 | RT (ret) | RA (addr) | RB (0));
+ }
+}
+
+#if defined (CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static void *qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static void *qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+
+static void tcg_out_tlb_read (TCGContext *s, int r0, int r1, int r2,
+ int addr_reg, int s_bits, int offset)
+{
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+
+ tcg_out32 (s, (RLWINM
+ | RA (r0)
+ | RS (addr_reg)
+ | SH (32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS))
+ | MB (32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS))
+ | ME (31 - CPU_TLB_ENTRY_BITS)
+ )
+ );
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (TCG_AREG0));
+ tcg_out32 (s, (LWZU | RT (r1) | RA (r0) | offset));
+ tcg_out32 (s, (RLWINM
+ | RA (r2)
+ | RS (addr_reg)
+ | SH (0)
+ | MB ((32 - s_bits) & 31)
+ | ME (31 - TARGET_PAGE_BITS)
+ )
+ );
+#else
+ tcg_out_rld (s, RLDICL, r0, addr_reg,
+ 64 - TARGET_PAGE_BITS,
+ 64 - CPU_TLB_BITS);
+ tcg_out_rld (s, RLDICR, r0, r0,
+ CPU_TLB_ENTRY_BITS,
+ 63 - CPU_TLB_ENTRY_BITS);
+
+ tcg_out32 (s, ADD | TAB (r0, r0, TCG_AREG0));
+ tcg_out32 (s, LD_ADDR | RT (r1) | RA (r0) | offset);
+
+ if (!s_bits) {
+ tcg_out_rld (s, RLDICR, r2, addr_reg, 0, 63 - TARGET_PAGE_BITS);
+ }
+ else {
+ tcg_out_rld (s, RLDICL, r2, addr_reg,
+ 64 - TARGET_PAGE_BITS,
+ TARGET_PAGE_BITS - s_bits);
+ tcg_out_rld (s, RLDICL, r2, r2, TARGET_PAGE_BITS, 0);
+ }
+#endif
+}
+#endif
+
+static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, data_reg, r0, r1, rbase, mem_index, s_bits, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+ s_bits = opc & 3;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+ rbase = 0;
+
+ tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
+ offsetof (CPUState, tlb_table[mem_index][0].addr_read));
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_movi (s, TCG_TYPE_I64, 4, mem_index);
+
+ tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
+
+ switch (opc) {
+ case 0|4:
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (3));
+ break;
+ case 1|4:
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (3));
+ break;
+ case 2|4:
+ tcg_out32 (s, EXTSW | RA (data_reg) | RS (3));
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ if (data_reg != 3)
+ tcg_out_mov (s, data_reg, 3);
+ break;
+ }
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
+ tcg_out32 (s, (LD_ADDEND
+ | RT (r0)
+ | RA (r0)
+ | (offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_read))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+#endif
+ r0 = addr_reg;
+ r1 = 3;
+ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ default:
+ case 0:
+ tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0));
+ break;
+ case 0|4:
+ tcg_out32 (s, LBZX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, EXTSB | RA (data_reg) | RS (data_reg));
+ break;
+ case 1:
+ if (bswap)
+ tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, LHZX | TAB (data_reg, rbase, r0));
+ break;
+ case 1|4:
+ if (bswap) {
+ tcg_out32 (s, LHBRX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, EXTSH | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LHAX | TAB (data_reg, rbase, r0));
+ break;
+ case 2:
+ if (bswap)
+ tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, LWZX | TAB (data_reg, rbase, r0));
+ break;
+ case 2|4:
+ if (bswap) {
+ tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, EXTSW | RA (data_reg) | RS (data_reg));
+ }
+ else tcg_out32 (s, LWAX | TAB (data_reg, rbase, r0));
+ break;
+ case 3:
+#ifdef CONFIG_USE_GUEST_BASE
+ if (bswap) {
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out32 (s, LWBRX | TAB (data_reg, rbase, r0));
+ tcg_out32 (s, LWBRX | TAB ( r1, rbase, r1));
+ tcg_out_rld (s, RLDIMI, data_reg, r1, 32, 0);
+ }
+ else tcg_out32 (s, LDX | TAB (data_reg, rbase, r0));
+#else
+ if (bswap) {
+ tcg_out_movi32 (s, 0, 4);
+ tcg_out32 (s, LWBRX | RT (data_reg) | RB (r0));
+ tcg_out32 (s, LWBRX | RT ( r1) | RA (r0));
+ tcg_out_rld (s, RLDIMI, data_reg, r1, 32, 0);
+ }
+ else tcg_out32 (s, LD | RT (data_reg) | RA (r0));
+#endif
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
+{
+ int addr_reg, r0, r1, rbase, data_reg, mem_index, bswap;
+#ifdef CONFIG_SOFTMMU
+ int r2;
+ void *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+#ifdef CONFIG_SOFTMMU
+ r0 = 3;
+ r1 = 4;
+ r2 = 0;
+ rbase = 0;
+
+ tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
+ offsetof (CPUState, tlb_table[mem_index][0].addr_write));
+
+ tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
+
+ label1_ptr = s->code_ptr;
+#ifdef FAST_PATH
+ tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
+#endif
+
+ /* slow path */
+ tcg_out_mov (s, 3, addr_reg);
+ tcg_out_rld (s, RLDICL, 4, data_reg, 0, 64 - (1 << (3 + opc)));
+ tcg_out_movi (s, TCG_TYPE_I64, 5, mem_index);
+
+ tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
+
+ label2_ptr = s->code_ptr;
+ tcg_out32 (s, B);
+
+ /* label1: fast path */
+#ifdef FAST_PATH
+ reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
+#endif
+
+ tcg_out32 (s, (LD_ADDEND
+ | RT (r0)
+ | RA (r0)
+ | (offsetof (CPUTLBEntry, addend)
+ - offsetof (CPUTLBEntry, addr_write))
+ ));
+ /* r0 = env->tlb_table[mem_index][index].addend */
+ tcg_out32 (s, ADD | RT (r0) | RA (r0) | RB (addr_reg));
+ /* r0 = env->tlb_table[mem_index][index].addend + addr */
+
+#else /* !CONFIG_SOFTMMU */
+#if TARGET_LONG_BITS == 32
+ tcg_out_rld (s, RLDICL, addr_reg, addr_reg, 0, 32);
+#endif
+ r1 = 3;
+ r0 = addr_reg;
+ rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ bswap = 0;
+#else
+ bswap = 1;
+#endif
+ switch (opc) {
+ case 0:
+ tcg_out32 (s, STBX | SAB (data_reg, rbase, r0));
+ break;
+ case 1:
+ if (bswap)
+ tcg_out32 (s, STHBRX | SAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, STHX | SAB (data_reg, rbase, r0));
+ break;
+ case 2:
+ if (bswap)
+ tcg_out32 (s, STWBRX | SAB (data_reg, rbase, r0));
+ else
+ tcg_out32 (s, STWX | SAB (data_reg, rbase, r0));
+ break;
+ case 3:
+ if (bswap) {
+ tcg_out32 (s, STWBRX | SAB (data_reg, rbase, r0));
+ tcg_out32 (s, ADDI | RT (r1) | RA (r0) | 4);
+ tcg_out_rld (s, RLDICL, 0, data_reg, 32, 0);
+ tcg_out32 (s, STWBRX | SAB (0, rbase, r1));
+ }
+ else tcg_out32 (s, STDX | SAB (data_reg, rbase, r0));
+ break;
+ }
+
+#ifdef CONFIG_SOFTMMU
+ reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
+#endif
+}
+
+void tcg_target_qemu_prologue (TCGContext *s)
+{
+ int i, frame_size;
+#ifndef __APPLE__
+ uint64_t addr;
+#endif
+
+ frame_size = 0
+ + 8 /* back chain */
+ + 8 /* CR */
+ + 8 /* LR */
+ + 8 /* compiler doubleword */
+ + 8 /* link editor doubleword */
+ + 8 /* TOC save area */
+ + TCG_STATIC_CALL_ARGS_SIZE
+ + ARRAY_SIZE (tcg_target_callee_save_regs) * 8
+ ;
+ frame_size = (frame_size + 15) & ~15;
+
+#ifndef __APPLE__
+ /* First emit adhoc function descriptor */
+ addr = (uint64_t) s->code_ptr + 24;
+ tcg_out32 (s, addr >> 32); tcg_out32 (s, addr); /* entry point */
+ s->code_ptr += 16; /* skip TOC and environment pointer */
+#endif
+
+ /* Prologue */
+ tcg_out32 (s, MFSPR | RT (0) | LR);
+ tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff));
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (STD
+ | RS (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 16));
+
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_out_movi (s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE);
+#endif
+
+ tcg_out32 (s, MTSPR | RS (3) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+
+ /* Epilogue */
+ tb_ret_addr = s->code_ptr;
+
+ for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
+ tcg_out32 (s, (LD
+ | RT (tcg_target_callee_save_regs[i])
+ | RA (1)
+ | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
+ )
+ );
+ tcg_out32 (s, LD | RT (0) | RA (1) | (frame_size + 16));
+ tcg_out32 (s, MTSPR | RS (0) | LR);
+ tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
+ tcg_out32 (s, BCLR | BO_ALWAYS);
+}
+
+static void tcg_out_ld (TCGContext *s, TCGType type, int ret, int arg1,
+ tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
+ else
+ tcg_out_ldsta (s, ret, arg1, arg2, LD, LDX);
+}
+
+static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1,
+ tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
+ else
+ tcg_out_ldsta (s, arg, arg1, arg2, STD, STDX);
+}
+
+static void ppc_addi32 (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ if (!si && rt == ra)
+ return;
+
+ if (si == (int16_t) si)
+ tcg_out32 (s, ADDI | RT (rt) | RA (ra) | (si & 0xffff));
+ else {
+ uint16_t h = ((si >> 16) & 0xffff) + ((uint16_t) si >> 15);
+ tcg_out32 (s, ADDIS | RT (rt) | RA (ra) | h);
+ tcg_out32 (s, ADDI | RT (rt) | RA (rt) | (si & 0xffff));
+ }
+}
+
+static void ppc_addi64 (TCGContext *s, int rt, int ra, tcg_target_long si)
+{
+ /* XXX: suboptimal */
+ if (si == (int16_t) si
+ || ((((uint64_t) si >> 31) == 0) && (si & 0x8000) == 0))
+ ppc_addi32 (s, rt, ra, si);
+ else {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, si);
+ tcg_out32 (s, ADD | RT (rt) | RA (ra));
+ }
+}
+
+static void tcg_out_addi (TCGContext *s, int reg, tcg_target_long val)
+{
+ ppc_addi64 (s, reg, reg, val);
+}
+
+static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
+ int const_arg2, int cr, int arch64)
+{
+ int imm;
+ uint32_t op;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ else if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ if (const_arg2) {
+ if ((int16_t) arg2 == arg2) {
+ op = CMPI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMP;
+ imm = 0;
+ break;
+
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ if (const_arg2) {
+ if ((uint16_t) arg2 == arg2) {
+ op = CMPLI;
+ imm = 1;
+ break;
+ }
+ }
+ op = CMPL;
+ imm = 0;
+ break;
+
+ default:
+ tcg_abort ();
+ }
+ op |= BF (cr) | (arch64 << 21);
+
+ if (imm)
+ tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
+ else {
+ if (const_arg2) {
+ tcg_out_movi (s, TCG_TYPE_I64, 0, arg2);
+ tcg_out32 (s, op | RA (arg1) | RB (0));
+ }
+ else
+ tcg_out32 (s, op | RA (arg1) | RB (arg2));
+ }
+
+}
+
+static void tcg_out_setcond (TCGContext *s, TCGType type, int cond, TCGArg arg0,
+ TCGArg arg1, TCGArg arg2, int const_arg2)
+{
+ int crop, sh, arg;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ if (const_arg2) {
+ if (!arg2) {
+ arg = arg1;
+ }
+ else {
+ arg = 0;
+ if ((uint16_t) arg2 == arg2) {
+ tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2);
+ }
+ else {
+ tcg_out_movi (s, type, 0, arg2);
+ tcg_out32 (s, XOR | SAB (arg1, 0, 0));
+ }
+ }
+ }
+ else {
+ arg = 0;
+ tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
+ }
+
+ if (type == TCG_TYPE_I64) {
+ tcg_out32 (s, CNTLZD | RS (arg) | RA (0));
+ tcg_out_rld (s, RLDICL, arg0, 0, 58, 6);
+ }
+ else {
+ tcg_out32 (s, CNTLZW | RS (arg) | RA (0));
+ tcg_out32 (s, (RLWINM
+ | RA (arg0)
+ | RS (0)
+ | SH (27)
+ | MB (5)
+ | ME (31)
+ )
+ );
+ }
+ break;
+
+ case TCG_COND_NE:
+ if (const_arg2) {
+ if (!arg2) {
+ arg = arg1;
+ }
+ else {
+ arg = 0;
+ if ((uint16_t) arg2 == arg2) {
+ tcg_out32 (s, XORI | RS (arg1) | RA (0) | arg2);
+ }
+ else {
+ tcg_out_movi (s, type, 0, arg2);
+ tcg_out32 (s, XOR | SAB (arg1, 0, 0));
+ }
+ }
+ }
+ else {
+ arg = 0;
+ tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
+ }
+
+ if (arg == arg1 && arg1 == arg0) {
+ tcg_out32 (s, ADDIC | RT (0) | RA (arg) | 0xffff);
+ tcg_out32 (s, SUBFE | TAB (arg0, 0, arg));
+ }
+ else {
+ tcg_out32 (s, ADDIC | RT (arg0) | RA (arg) | 0xffff);
+ tcg_out32 (s, SUBFE | TAB (arg0, arg0, arg));
+ }
+ break;
+
+ case TCG_COND_GT:
+ case TCG_COND_GTU:
+ sh = 30;
+ crop = 0;
+ goto crtest;
+
+ case TCG_COND_LT:
+ case TCG_COND_LTU:
+ sh = 29;
+ crop = 0;
+ goto crtest;
+
+ case TCG_COND_GE:
+ case TCG_COND_GEU:
+ sh = 31;
+ crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_LT) | BB (7, CR_LT);
+ goto crtest;
+
+ case TCG_COND_LE:
+ case TCG_COND_LEU:
+ sh = 31;
+ crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_GT) | BB (7, CR_GT);
+ crtest:
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7, type == TCG_TYPE_I64);
+ if (crop) tcg_out32 (s, crop);
+ tcg_out32 (s, MFCR | RT (0));
+ tcg_out32 (s, (RLWINM
+ | RA (arg0)
+ | RS (0)
+ | SH (sh)
+ | MB (31)
+ | ME (31)
+ )
+ );
+ break;
+
+ default:
+ tcg_abort ();
+ }
+}
+
+static void tcg_out_bc (TCGContext *s, int bc, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value)
+ tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
+ else {
+ uint16_t val = *(uint16_t *) &s->code_ptr[2];
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, bc | (val & 0xfffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
+ }
+}
+
+static void tcg_out_brcond (TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index, int arch64)
+{
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7, arch64);
+ tcg_out_bc (s, tcg_to_bc[cond], label_index);
+}
+
+void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
+{
+ TCGContext s;
+ unsigned long patch_size;
+
+ s.code_ptr = (uint8_t *) jmp_addr;
+ tcg_out_b (&s, 0, addr);
+ patch_size = s.code_ptr - (uint8_t *) jmp_addr;
+ flush_icache_range (jmp_addr, jmp_addr + patch_size);
+}
+
+static void tcg_out_op (TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi (s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
+ tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ s->code_ptr += 28;
+ }
+ else {
+ tcg_abort ();
+ }
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_br:
+ {
+ TCGLabel *l = &s->labels[args[0]];
+
+ if (l->has_value) {
+ tcg_out_b (s, 0, l->u.value);
+ }
+ else {
+ uint32_t val = *(uint32_t *) s->code_ptr;
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, B | (val & 0x3fffffc));
+ tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
+ }
+ }
+ break;
+ case INDEX_op_call:
+ tcg_out_call (s, args[0], const_args[0]);
+ break;
+ case INDEX_op_jmp:
+ if (const_args[0]) {
+ tcg_out_b (s, 0, args[0]);
+ }
+ else {
+ tcg_out32 (s, MTSPR | RS (args[0]) | CTR);
+ tcg_out32 (s, BCCTR | BO_ALWAYS);
+ }
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi (s, TCG_TYPE_I32, args[0], args[1]);
+ break;
+ case INDEX_op_movi_i64:
+ tcg_out_movi (s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ break;
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
+ tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
+ break;
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
+ break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldsta (s, args[0], args[1], args[2], LWA, LWAX);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldsta (s, args[0], args[1], args[2], LD, LDX);
+ break;
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldsta (s, args[0], args[1], args[2], STD, STDX);
+ break;
+
+ case INDEX_op_add_i32:
+ if (const_args[2])
+ ppc_addi32 (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i32:
+ if (const_args[2])
+ ppc_addi32 (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_and_i64:
+ case INDEX_op_and_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | args[2]);
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, (opc == INDEX_op_and_i32
+ ? TCG_TYPE_I32
+ : TCG_TYPE_I64),
+ 0, args[2]);
+ tcg_out32 (s, AND | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, AND | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_or_i64:
+ case INDEX_op_or_i32:
+ if (const_args[2]) {
+ if (args[2] & 0xffff) {
+ tcg_out32 (s, ORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ if (args[2] >> 16)
+ tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ else {
+ tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ }
+ }
+ else
+ tcg_out32 (s, OR | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_xor_i64:
+ case INDEX_op_xor_i32:
+ if (const_args[2]) {
+ if ((args[2] & 0xffff) == args[2])
+ tcg_out32 (s, XORI | RS (args[1]) | RA (args[0])
+ | (args[2] & 0xffff));
+ else if ((args[2] & 0xffff0000) == args[2])
+ tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0])
+ | ((args[2] >> 16) & 0xffff));
+ else {
+ tcg_out_movi (s, (opc == INDEX_op_and_i32
+ ? TCG_TYPE_I32
+ : TCG_TYPE_I64),
+ 0, args[2]);
+ tcg_out32 (s, XOR | SAB (args[1], args[0], 0));
+ }
+ }
+ else
+ tcg_out32 (s, XOR | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i32:
+ if (const_args[2]) {
+ if (args[2] == (int16_t) args[2])
+ tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1])
+ | (args[2] & 0xffff));
+ else {
+ tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]);
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], 0));
+ }
+ }
+ else
+ tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_shl_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (args[2])
+ | MB (0)
+ | ME (31 - args[2])
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i32:
+ if (const_args[2]) {
+ tcg_out32 (s, (RLWINM
+ | RA (args[0])
+ | RS (args[1])
+ | SH (32 - args[2])
+ | MB (args[2])
+ | ME (31)
+ )
+ );
+ }
+ else
+ tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i32:
+ if (const_args[2])
+ tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
+ else
+ tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 0);
+ break;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 1);
+ break;
+
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
+ break;
+
+ case INDEX_op_add_i64:
+ if (const_args[2])
+ ppc_addi64 (s, args[0], args[1], args[2]);
+ else
+ tcg_out32 (s, ADD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_sub_i64:
+ if (const_args[2])
+ ppc_addi64 (s, args[0], args[1], -args[2]);
+ else
+ tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1]));
+ break;
+
+ case INDEX_op_shl_i64:
+ if (const_args[2])
+ tcg_out_rld (s, RLDICR, args[0], args[1], args[2], 63 - args[2]);
+ else
+ tcg_out32 (s, SLD | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_shr_i64:
+ if (const_args[2])
+ tcg_out_rld (s, RLDICL, args[0], args[1], 64 - args[2], args[2]);
+ else
+ tcg_out32 (s, SRD | SAB (args[1], args[0], args[2]));
+ break;
+ case INDEX_op_sar_i64:
+ if (const_args[2]) {
+ int sh = SH (args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
+ tcg_out32 (s, SRADI | RA (args[0]) | RS (args[1]) | sh);
+ }
+ else
+ tcg_out32 (s, SRAD | SAB (args[1], args[0], args[2]));
+ break;
+
+ case INDEX_op_mul_i64:
+ tcg_out32 (s, MULLD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_div_i64:
+ tcg_out32 (s, DIVD | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_divu_i64:
+ tcg_out32 (s, DIVDU | TAB (args[0], args[1], args[2]));
+ break;
+ case INDEX_op_rem_i64:
+ tcg_out32 (s, DIVD | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+ case INDEX_op_remu_i64:
+ tcg_out32 (s, DIVDU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld (s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld (s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld (s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld (s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld (s, args, 2);
+ break;
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld (s, args, 2 | 4);
+ break;
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld (s, args, 3);
+ break;
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st (s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st (s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st (s, args, 2);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st (s, args, 3);
+ break;
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ c = EXTSB;
+ goto gen_ext;
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ c = EXTSH;
+ goto gen_ext;
+ case INDEX_op_ext32s_i64:
+ c = EXTSW;
+ goto gen_ext;
+ gen_ext:
+ tcg_out32 (s, c | RS (args[1]) | RA (args[0]));
+ break;
+
+ case INDEX_op_setcond_i32:
+ tcg_out_setcond (s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
+ const_args[2]);
+ break;
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond (s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
+ const_args[2]);
+ break;
+
+ default:
+ tcg_dump_ops (s, stderr);
+ tcg_abort ();
+ }
+}
+
+static const TCGTargetOpDef ppc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "ri" } },
+ { INDEX_op_mul_i32, { "r", "r", "ri" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+ { INDEX_op_sub_i32, { "r", "r", "ri" } },
+ { INDEX_op_and_i32, { "r", "r", "ri" } },
+ { INDEX_op_or_i32, { "r", "r", "ri" } },
+ { INDEX_op_xor_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "ri" } },
+ { INDEX_op_shr_i32, { "r", "r", "ri" } },
+ { INDEX_op_sar_i32, { "r", "r", "ri" } },
+
+ { INDEX_op_brcond_i32, { "r", "ri" } },
+ { INDEX_op_brcond_i64, { "r", "ri" } },
+
+ { INDEX_op_neg_i32, { "r", "r" } },
+
+ { INDEX_op_add_i64, { "r", "r", "ri" } },
+ { INDEX_op_sub_i64, { "r", "r", "ri" } },
+ { INDEX_op_and_i64, { "r", "r", "rZ" } },
+ { INDEX_op_or_i64, { "r", "r", "rZ" } },
+ { INDEX_op_xor_i64, { "r", "r", "rZ" } },
+
+ { INDEX_op_shl_i64, { "r", "r", "ri" } },
+ { INDEX_op_shr_i64, { "r", "r", "ri" } },
+ { INDEX_op_sar_i64, { "r", "r", "ri" } },
+
+ { INDEX_op_mul_i64, { "r", "r", "r" } },
+ { INDEX_op_div_i64, { "r", "r", "r" } },
+ { INDEX_op_divu_i64, { "r", "r", "r" } },
+ { INDEX_op_rem_i64, { "r", "r", "r" } },
+ { INDEX_op_remu_i64, { "r", "r", "r" } },
+
+ { INDEX_op_neg_i64, { "r", "r" } },
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "S", "S" } },
+ { INDEX_op_qemu_st16, { "S", "S" } },
+ { INDEX_op_qemu_st32, { "S", "S" } },
+ { INDEX_op_qemu_st64, { "S", "S" } },
+
+ { INDEX_op_ext8s_i32, { "r", "r" } },
+ { INDEX_op_ext16s_i32, { "r", "r" } },
+ { INDEX_op_ext8s_i64, { "r", "r" } },
+ { INDEX_op_ext16s_i64, { "r", "r" } },
+ { INDEX_op_ext32s_i64, { "r", "r" } },
+
+ { INDEX_op_setcond_i32, { "r", "r", "ri" } },
+ { INDEX_op_setcond_i64, { "r", "r", "ri" } },
+
+ { -1 },
+};
+
+void tcg_target_init (TCGContext *s)
+{
+ tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+ tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
+ tcg_regset_set32 (tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_R0) |
+#ifdef __APPLE__
+ (1 << TCG_REG_R2) |
+#endif
+ (1 << TCG_REG_R3) |
+ (1 << TCG_REG_R4) |
+ (1 << TCG_REG_R5) |
+ (1 << TCG_REG_R6) |
+ (1 << TCG_REG_R7) |
+ (1 << TCG_REG_R8) |
+ (1 << TCG_REG_R9) |
+ (1 << TCG_REG_R10) |
+ (1 << TCG_REG_R11) |
+ (1 << TCG_REG_R12)
+ );
+
+ tcg_regset_clear (s->reserved_regs);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R0);
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R1);
+#ifndef __APPLE__
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R2);
+#endif
+ tcg_regset_set_reg (s->reserved_regs, TCG_REG_R13);
+
+#ifdef CONFIG_USE_GUEST_BASE
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+#endif
+
+ tcg_add_target_add_op_defs (ppc_op_defs);
+}
diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h
new file mode 100644
index 0000000..f5de642
--- /dev/null
+++ b/tcg/ppc64/tcg-target.h
@@ -0,0 +1,104 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_PPC64 1
+
+#define TCG_TARGET_REG_BITS 64
+#define TCG_TARGET_WORDS_BIGENDIAN
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_R0 = 0,
+ TCG_REG_R1,
+ TCG_REG_R2,
+ TCG_REG_R3,
+ TCG_REG_R4,
+ TCG_REG_R5,
+ TCG_REG_R6,
+ TCG_REG_R7,
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R24,
+ TCG_REG_R25,
+ TCG_REG_R26,
+ TCG_REG_R27,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31
+};
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_R1
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_STACK_OFFSET 48
+
+/* optional instructions */
+#define TCG_TARGET_HAS_div_i32
+/* #define TCG_TARGET_HAS_rot_i32 */
+#define TCG_TARGET_HAS_ext8s_i32
+#define TCG_TARGET_HAS_ext16s_i32
+/* #define TCG_TARGET_HAS_ext8u_i32 */
+/* #define TCG_TARGET_HAS_ext16u_i32 */
+/* #define TCG_TARGET_HAS_bswap16_i32 */
+/* #define TCG_TARGET_HAS_bswap32_i32 */
+/* #define TCG_TARGET_HAS_not_i32 */
+#define TCG_TARGET_HAS_neg_i32
+/* #define TCG_TARGET_HAS_andc_i32 */
+/* #define TCG_TARGET_HAS_orc_i32 */
+
+#define TCG_TARGET_HAS_div_i64
+/* #define TCG_TARGET_HAS_rot_i64 */
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+/* #define TCG_TARGET_HAS_ext8u_i64 */
+/* #define TCG_TARGET_HAS_ext16u_i64 */
+/* #define TCG_TARGET_HAS_ext32u_i64 */
+/* #define TCG_TARGET_HAS_bswap16_i64 */
+/* #define TCG_TARGET_HAS_bswap32_i64 */
+/* #define TCG_TARGET_HAS_bswap64_i64 */
+/* #define TCG_TARGET_HAS_not_i64 */
+#define TCG_TARGET_HAS_neg_i64
+/* #define TCG_TARGET_HAS_andc_i64 */
+/* #define TCG_TARGET_HAS_orc_i64 */
+
+#define TCG_AREG0 TCG_REG_R27
+#define TCG_AREG1 TCG_REG_R24
+#define TCG_AREG2 TCG_REG_R25
+
+#define TCG_TARGET_HAS_GUEST_BASE
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
new file mode 100644
index 0000000..d4ddaa7
--- /dev/null
+++ b/tcg/sparc/tcg-target.c
@@ -0,0 +1,1563 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef NDEBUG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "%g0",
+ "%g1",
+ "%g2",
+ "%g3",
+ "%g4",
+ "%g5",
+ "%g6",
+ "%g7",
+ "%o0",
+ "%o1",
+ "%o2",
+ "%o3",
+ "%o4",
+ "%o5",
+ "%o6",
+ "%o7",
+ "%l0",
+ "%l1",
+ "%l2",
+ "%l3",
+ "%l4",
+ "%l5",
+ "%l6",
+ "%l7",
+ "%i0",
+ "%i1",
+ "%i2",
+ "%i3",
+ "%i4",
+ "%i5",
+ "%i6",
+ "%i7",
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ TCG_REG_L0,
+ TCG_REG_L1,
+ TCG_REG_L2,
+ TCG_REG_L3,
+ TCG_REG_L4,
+ TCG_REG_L5,
+ TCG_REG_L6,
+ TCG_REG_L7,
+ TCG_REG_I0,
+ TCG_REG_I1,
+ TCG_REG_I2,
+ TCG_REG_I3,
+ TCG_REG_I4,
+};
+
+static const int tcg_target_call_iarg_regs[6] = {
+ TCG_REG_O0,
+ TCG_REG_O1,
+ TCG_REG_O2,
+ TCG_REG_O3,
+ TCG_REG_O4,
+ TCG_REG_O5,
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_O0,
+ TCG_REG_O1,
+};
+
+static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
+{
+ return (val << ((sizeof(tcg_target_long) * 8 - bits))
+ >> (sizeof(tcg_target_long) * 8 - bits)) == val;
+}
+
+static inline int check_fit_i32(uint32_t val, unsigned int bits)
+{
+ return ((val << (32 - bits)) >> (32 - bits)) == val;
+}
+
+static void patch_reloc(uint8_t *code_ptr, int type,
+ tcg_target_long value, tcg_target_long addend)
+{
+ value += addend;
+ switch (type) {
+ case R_SPARC_32:
+ if (value != (uint32_t)value)
+ tcg_abort();
+ *(uint32_t *)code_ptr = value;
+ break;
+ case R_SPARC_WDISP22:
+ value -= (long)code_ptr;
+ value >>= 2;
+ if (!check_fit_tl(value, 22))
+ tcg_abort();
+ *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
+ break;
+ case R_SPARC_WDISP19:
+ value -= (long)code_ptr;
+ value >>= 2;
+ if (!check_fit_tl(value, 19))
+ tcg_abort();
+ *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/* maximum number of register used for input function arguments */
+static inline int tcg_target_get_call_iarg_regs_count(int flags)
+{
+ return 6;
+}
+
+/* parse target specific constraints */
+static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+{
+ const char *ct_str;
+
+ ct_str = *pct_str;
+ switch (ct_str[0]) {
+ case 'r':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ break;
+ case 'L': /* qemu_ld/st constraint */
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
+ // Helper args
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
+ break;
+ case 'I':
+ ct->ct |= TCG_CT_CONST_S11;
+ break;
+ case 'J':
+ ct->ct |= TCG_CT_CONST_S13;
+ break;
+ default:
+ return -1;
+ }
+ ct_str++;
+ *pct_str = ct_str;
+ return 0;
+}
+
+/* test if a constant matches the constraint */
+static inline int tcg_target_const_match(tcg_target_long val,
+ const TCGArgConstraint *arg_ct)
+{
+ int ct;
+
+ ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST)
+ return 1;
+ else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
+ return 1;
+ else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
+ return 1;
+ else
+ return 0;
+}
+
+#define INSN_OP(x) ((x) << 30)
+#define INSN_OP2(x) ((x) << 22)
+#define INSN_OP3(x) ((x) << 19)
+#define INSN_OPF(x) ((x) << 5)
+#define INSN_RD(x) ((x) << 25)
+#define INSN_RS1(x) ((x) << 14)
+#define INSN_RS2(x) (x)
+#define INSN_ASI(x) ((x) << 5)
+
+#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
+#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
+#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
+#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
+
+#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
+#define COND_N 0x0
+#define COND_E 0x1
+#define COND_LE 0x2
+#define COND_L 0x3
+#define COND_LEU 0x4
+#define COND_CS 0x5
+#define COND_NEG 0x6
+#define COND_VS 0x7
+#define COND_A 0x8
+#define COND_NE 0x9
+#define COND_G 0xa
+#define COND_GE 0xb
+#define COND_GU 0xc
+#define COND_CC 0xd
+#define COND_POS 0xe
+#define COND_VC 0xf
+#define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
+
+#define MOVCC_ICC (1 << 18)
+#define MOVCC_XCC (1 << 18 | 1 << 12)
+
+#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
+#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
+#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
+#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
+#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
+#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
+#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
+#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
+#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
+#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
+#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
+#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
+#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
+#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
+#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
+#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
+#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
+#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
+#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
+
+#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
+#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
+#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
+
+#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
+#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
+#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
+
+#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
+#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
+#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
+#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
+#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
+#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
+#define CALL INSN_OP(1)
+#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
+#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
+#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
+#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
+#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
+#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
+#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
+#define STB (INSN_OP(3) | INSN_OP3(0x05))
+#define STH (INSN_OP(3) | INSN_OP3(0x06))
+#define STW (INSN_OP(3) | INSN_OP3(0x04))
+#define STX (INSN_OP(3) | INSN_OP3(0x0e))
+#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
+#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
+#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
+#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
+#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
+#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
+#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
+#define STBA (INSN_OP(3) | INSN_OP3(0x15))
+#define STHA (INSN_OP(3) | INSN_OP3(0x16))
+#define STWA (INSN_OP(3) | INSN_OP3(0x14))
+#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
+
+#ifndef ASI_PRIMARY_LITTLE
+#define ASI_PRIMARY_LITTLE 0x88
+#endif
+
+static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
+ int op)
+{
+ tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
+ INSN_RS2(rs2));
+}
+
+static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
+ uint32_t offset, int op)
+{
+ tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
+ INSN_IMM13(offset));
+}
+
+static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
+ int val2, int val2const, int op)
+{
+ tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
+ | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
+}
+
+static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
+{
+ tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
+}
+
+static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
+{
+ tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
+}
+
+static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
+{
+ tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
+}
+
+static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
+{
+ if (check_fit_tl(arg, 13))
+ tcg_out_movi_imm13(s, ret, arg);
+ else {
+ tcg_out_sethi(s, ret, arg);
+ if (arg & 0x3ff)
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
+ }
+}
+
+static inline void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
+{
+ /* All 32-bit constants, as well as 64-bit constants with
+ no high bits set go through movi_imm32. */
+ if (TCG_TARGET_REG_BITS == 32
+ || type == TCG_TYPE_I32
+ || (arg & ~(tcg_target_long)0xffffffff) == 0) {
+ tcg_out_movi_imm32(s, ret, arg);
+ } else if (check_fit_tl(arg, 13)) {
+ /* A 13-bit constant sign-extended to 64-bits. */
+ tcg_out_movi_imm13(s, ret, arg);
+ } else if (check_fit_tl(arg, 32)) {
+ /* A 32-bit constant sign-extended to 64-bits. */
+ tcg_out_sethi(s, ret, ~arg);
+ tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
+ } else {
+ tcg_out_movi_imm32(s, TCG_REG_I4, arg >> (TCG_TARGET_REG_BITS / 2));
+ tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
+ tcg_out_movi_imm32(s, ret, arg);
+ tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
+ }
+}
+
+static inline void tcg_out_ld_raw(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ tcg_out_sethi(s, ret, arg);
+ tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+}
+
+static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
+ tcg_target_long arg)
+{
+ if (!check_fit_tl(arg, 10))
+ tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ffULL);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+ } else {
+ tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
+ INSN_IMM13(arg & 0x3ff));
+ }
+}
+
+static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
+{
+ if (check_fit_tl(offset, 13))
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
+ INSN_IMM13(offset));
+ else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(addr));
+ }
+}
+
+static inline void tcg_out_ldst_asi(TCGContext *s, int ret, int addr,
+ int offset, int op, int asi)
+{
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
+ tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
+ INSN_ASI(asi) | INSN_RS2(addr));
+}
+
+static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst(s, ret, arg1, arg2, LDUW);
+ else
+ tcg_out_ldst(s, ret, arg1, arg2, LDX);
+}
+
+static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
+ int arg1, tcg_target_long arg2)
+{
+ if (type == TCG_TYPE_I32)
+ tcg_out_ldst(s, arg, arg1, arg2, STW);
+ else
+ tcg_out_ldst(s, arg, arg1, arg2, STX);
+}
+
+static inline void tcg_out_sety(TCGContext *s, int rs)
+{
+ tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
+}
+
+static inline void tcg_out_rdy(TCGContext *s, int rd)
+{
+ tcg_out32(s, RDY | INSN_RD(rd));
+}
+
+static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0) {
+ if (check_fit_tl(val, 13))
+ tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
+ else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
+ tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
+ }
+ }
+}
+
+static inline void tcg_out_andi(TCGContext *s, int reg, tcg_target_long val)
+{
+ if (val != 0) {
+ if (check_fit_tl(val, 13))
+ tcg_out_arithi(s, reg, reg, val, ARITH_AND);
+ else {
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
+ tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_AND);
+ }
+ }
+}
+
+static void tcg_out_div32(TCGContext *s, int rd, int rs1,
+ int val2, int val2const, int uns)
+{
+ /* Load Y with the sign/zero extension of RS1 to 64-bits. */
+ if (uns) {
+ tcg_out_sety(s, TCG_REG_G0);
+ } else {
+ tcg_out_arithi(s, TCG_REG_I5, rs1, 31, SHIFT_SRA);
+ tcg_out_sety(s, TCG_REG_I5);
+ }
+
+ tcg_out_arithc(s, rd, rs1, val2, val2const,
+ uns ? ARITH_UDIV : ARITH_SDIV);
+}
+
+static inline void tcg_out_nop(TCGContext *s)
+{
+ tcg_out_sethi(s, TCG_REG_G0, 0);
+}
+
+static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
+{
+ int32_t val;
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value) {
+ val = l->u.value - (tcg_target_long)s->code_ptr;
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2)
+ | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
+ } else {
+ tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | 0));
+ }
+}
+
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
+{
+ int32_t val;
+ TCGLabel *l = &s->labels[label_index];
+
+ if (l->has_value) {
+ val = l->u.value - (tcg_target_long)s->code_ptr;
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
+ (0x5 << 19) |
+ INSN_OFF19(l->u.value - (unsigned long)s->code_ptr)));
+ } else {
+ tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
+ tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
+ (0x5 << 19) | 0));
+ }
+}
+#endif
+
+static const uint8_t tcg_cond_to_bcond[10] = {
+ [TCG_COND_EQ] = COND_E,
+ [TCG_COND_NE] = COND_NE,
+ [TCG_COND_LT] = COND_L,
+ [TCG_COND_GE] = COND_GE,
+ [TCG_COND_LE] = COND_LE,
+ [TCG_COND_GT] = COND_G,
+ [TCG_COND_LTU] = COND_CS,
+ [TCG_COND_GEU] = COND_CC,
+ [TCG_COND_LEU] = COND_LEU,
+ [TCG_COND_GTU] = COND_GU,
+};
+
+static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
+{
+ tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
+}
+
+static void tcg_out_brcond_i32(TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
+ tcg_out_nop(s);
+}
+
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_brcond_i64(TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
+ tcg_out_nop(s);
+}
+#else
+static void tcg_out_brcond2_i32(TCGContext *s, int cond,
+ TCGArg al, TCGArg ah,
+ TCGArg bl, int blconst,
+ TCGArg bh, int bhconst, int label_dest)
+{
+ int cc, label_next = gen_new_label();
+
+ tcg_out_cmp(s, ah, bh, bhconst);
+
+ /* Note that we fill one of the delay slots with the second compare. */
+ switch (cond) {
+ case TCG_COND_EQ:
+ cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
+ tcg_out_branch_i32(s, cc, label_next);
+ tcg_out_cmp(s, al, bl, blconst);
+ cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_EQ], 0);
+ tcg_out_branch_i32(s, cc, label_dest);
+ break;
+
+ case TCG_COND_NE:
+ cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
+ tcg_out_branch_i32(s, cc, label_dest);
+ tcg_out_cmp(s, al, bl, blconst);
+ tcg_out_branch_i32(s, cc, label_dest);
+ break;
+
+ default:
+ /* ??? One could fairly easily special-case 64-bit unsigned
+ compares against 32-bit zero-extended constants. For instance,
+ we know that (unsigned)AH < 0 is false and need not emit it.
+ Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
+ second branch will never be taken. */
+ cc = INSN_COND(tcg_cond_to_bcond[cond], 0);
+ tcg_out_branch_i32(s, cc, label_dest);
+ tcg_out_nop(s);
+ cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
+ tcg_out_branch_i32(s, cc, label_next);
+ tcg_out_cmp(s, al, bl, blconst);
+ cc = INSN_COND(tcg_cond_to_bcond[tcg_unsigned_cond(cond)], 0);
+ tcg_out_branch_i32(s, cc, label_dest);
+ break;
+ }
+ tcg_out_nop(s);
+
+ tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
+}
+#endif
+
+static void tcg_out_setcond_i32(TCGContext *s, int cond, TCGArg ret,
+ TCGArg c1, TCGArg c2, int c2const)
+{
+ TCGArg t;
+
+ /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (c2 != 0) {
+ tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
+ }
+ c1 = TCG_REG_G0, c2 = ret, c2const = 0;
+ cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
+ break;
+
+ case TCG_COND_GTU:
+ case TCG_COND_GEU:
+ if (c2const && c2 != 0) {
+ tcg_out_movi_imm13(s, TCG_REG_I5, c2);
+ c2 = TCG_REG_I5;
+ }
+ t = c1, c1 = c2, c2 = t, c2const = 0;
+ cond = tcg_swap_cond(cond);
+ break;
+
+ case TCG_COND_LTU:
+ case TCG_COND_LEU:
+ break;
+
+ default:
+ tcg_out_cmp(s, c1, c2, c2const);
+#if defined(__sparc_v9__) || defined(__sparc_v8plus__)
+ tcg_out_movi_imm13(s, ret, 0);
+ tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
+ | INSN_RS1(tcg_cond_to_bcond[cond])
+ | MOVCC_ICC | INSN_IMM11(1));
+#else
+ t = gen_new_label();
+ tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), t);
+ tcg_out_movi_imm13(s, ret, 1);
+ tcg_out_movi_imm13(s, ret, 0);
+ tcg_out_label(s, t, (tcg_target_long)s->code_ptr);
+#endif
+ return;
+ }
+
+ tcg_out_cmp(s, c1, c2, c2const);
+ if (cond == TCG_COND_LTU) {
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
+ } else {
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
+ }
+}
+
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_setcond_i64(TCGContext *s, int cond, TCGArg ret,
+ TCGArg c1, TCGArg c2, int c2const)
+{
+ tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_movi_imm13(s, ret, 0);
+ tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
+ | INSN_RS1(tcg_cond_to_bcond[cond])
+ | MOVCC_XCC | INSN_IMM11(1));
+}
+#else
+static void tcg_out_setcond2_i32(TCGContext *s, int cond, TCGArg ret,
+ TCGArg al, TCGArg ah,
+ TCGArg bl, int blconst,
+ TCGArg bh, int bhconst)
+{
+ int lab;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_I5, al, bl, blconst);
+ tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
+ tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_AND);
+ break;
+
+ case TCG_COND_NE:
+ tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_I5, al, al, blconst);
+ tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
+ tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_OR);
+ break;
+
+ default:
+ lab = gen_new_label();
+
+ tcg_out_cmp(s, ah, bh, bhconst);
+ tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), lab);
+ tcg_out_movi_imm13(s, ret, 1);
+ tcg_out_branch_i32(s, INSN_COND(COND_NE, 1), lab);
+ tcg_out_movi_imm13(s, ret, 0);
+
+ tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), ret, al, bl, blconst);
+
+ tcg_out_label(s, lab, (tcg_target_long)s->code_ptr);
+ break;
+ }
+}
+#endif
+
+/* Generate global QEMU prologue and epilogue code */
+void tcg_target_qemu_prologue(TCGContext *s)
+{
+ tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
+ INSN_IMM13(-TCG_TARGET_STACK_MINFRAME));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I0) |
+ INSN_RS2(TCG_REG_G0));
+ tcg_out_nop(s);
+}
+
+#if defined(CONFIG_SOFTMMU)
+
+#include "../../softmmu_defs.h"
+
+static const void * const qemu_ld_helpers[4] = {
+ __ldb_mmu,
+ __ldw_mmu,
+ __ldl_mmu,
+ __ldq_mmu,
+};
+
+static const void * const qemu_st_helpers[4] = {
+ __stb_mmu,
+ __stw_mmu,
+ __stl_mmu,
+ __stq_mmu,
+};
+#endif
+
+#if TARGET_LONG_BITS == 32
+#define TARGET_LD_OP LDUW
+#else
+#define TARGET_LD_OP LDX
+#endif
+
+#if TARGET_PHYS_ADDR_BITS == 32
+#define TARGET_ADDEND_LD_OP LDUW
+#else
+#define TARGET_ADDEND_LD_OP LDX
+#endif
+
+#ifdef __arch64__
+#define HOST_LD_OP LDX
+#define HOST_ST_OP STX
+#define HOST_SLL_OP SHIFT_SLLX
+#define HOST_SRA_OP SHIFT_SRAX
+#else
+#define HOST_LD_OP LDUW
+#define HOST_ST_OP STW
+#define HOST_SLL_OP SHIFT_SLL
+#define HOST_SRA_OP SHIFT_SRA
+#endif
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+ s_bits = opc & 3;
+
+ arg0 = TCG_REG_O0;
+ arg1 = TCG_REG_O1;
+ arg2 = TCG_REG_O2;
+
+#if defined(CONFIG_SOFTMMU)
+ /* srl addr_reg, x, arg1 */
+ tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
+ SHIFT_SRL);
+ /* and addr_reg, x, arg0 */
+ tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ /* and arg1, x, arg1 */
+ tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* add arg1, x, arg1 */
+ tcg_out_addi(s, arg1, offsetof(CPUState,
+ tlb_table[mem_index][0].addr_read));
+
+ /* add env, arg1, arg1 */
+ tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
+
+ /* ld [arg1], arg2 */
+ tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
+ INSN_RS2(TCG_REG_G0));
+
+ /* subcc arg0, arg2, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
+
+ /* will become:
+ be label1
+ or
+ be,pt %xcc label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* mov (delay slot) */
+ tcg_out_mov(s, arg0, addr_reg);
+
+ /* mov */
+ tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
+
+ /* XXX: move that code at the end of the TB */
+ /* qemu_ld_helper[s_bits](arg0, arg1) */
+ tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_LD_OP);
+
+ /* data_reg = sign_extend(arg0) */
+ switch(opc) {
+ case 0 | 4:
+ /* sll arg0, 24/56, data_reg */
+ tcg_out_arithi(s, data_reg, arg0, (int)sizeof(tcg_target_long) * 8 - 8,
+ HOST_SLL_OP);
+ /* sra data_reg, 24/56, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg,
+ (int)sizeof(tcg_target_long) * 8 - 8, HOST_SRA_OP);
+ break;
+ case 1 | 4:
+ /* sll arg0, 16/48, data_reg */
+ tcg_out_arithi(s, data_reg, arg0,
+ (int)sizeof(tcg_target_long) * 8 - 16, HOST_SLL_OP);
+ /* sra data_reg, 16/48, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg,
+ (int)sizeof(tcg_target_long) * 8 - 16, HOST_SRA_OP);
+ break;
+ case 2 | 4:
+ /* sll arg0, 32, data_reg */
+ tcg_out_arithi(s, data_reg, arg0, 32, HOST_SLL_OP);
+ /* sra data_reg, 32, data_reg */
+ tcg_out_arithi(s, data_reg, data_reg, 32, HOST_SRA_OP);
+ break;
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* mov */
+ tcg_out_mov(s, data_reg, arg0);
+ break;
+ }
+
+ /* will become:
+ ba label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* nop (delay slot */
+ tcg_out_nop(s);
+
+ /* label1: */
+#if TARGET_LONG_BITS == 32
+ /* be label1 */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+#else
+ /* be,pt %xcc label1 */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1) |
+ (0x5 << 19) | INSN_OFF19((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+#endif
+
+ /* ld [arg1 + x], arg1 */
+ tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_read), TARGET_ADDEND_LD_OP);
+
+#if TARGET_LONG_BITS == 32
+ /* and addr_reg, x, arg0 */
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
+ tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
+ /* add arg0, arg1, arg0 */
+ tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
+#else
+ /* add addr_reg, arg1, arg0 */
+ tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
+#endif
+
+#else
+ arg0 = addr_reg;
+#endif
+
+ switch(opc) {
+ case 0:
+ /* ldub [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUB);
+ break;
+ case 0 | 4:
+ /* ldsb [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSB);
+ break;
+ case 1:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* lduh [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUH);
+#else
+ /* lduha [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 1 | 4:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldsh [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSH);
+#else
+ /* ldsha [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* lduw [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDUW);
+#else
+ /* lduwa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2 | 4:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldsw [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDSW);
+#else
+ /* ldswa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 3:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* ldx [arg0], data_reg */
+ tcg_out_ldst(s, data_reg, arg0, 0, LDX);
+#else
+ /* ldxa [arg0] ASI_PRIMARY_LITTLE, data_reg */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, LDXA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label2_ptr));
+#endif
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ int opc)
+{
+ int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
+#if defined(CONFIG_SOFTMMU)
+ uint32_t *label1_ptr, *label2_ptr;
+#endif
+
+ data_reg = *args++;
+ addr_reg = *args++;
+ mem_index = *args;
+
+ s_bits = opc;
+
+ arg0 = TCG_REG_O0;
+ arg1 = TCG_REG_O1;
+ arg2 = TCG_REG_O2;
+
+#if defined(CONFIG_SOFTMMU)
+ /* srl addr_reg, x, arg1 */
+ tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
+ SHIFT_SRL);
+
+ /* and addr_reg, x, arg0 */
+ tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
+ ARITH_AND);
+
+ /* and arg1, x, arg1 */
+ tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+
+ /* add arg1, x, arg1 */
+ tcg_out_addi(s, arg1, offsetof(CPUState,
+ tlb_table[mem_index][0].addr_write));
+
+ /* add env, arg1, arg1 */
+ tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
+
+ /* ld [arg1], arg2 */
+ tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
+ INSN_RS2(TCG_REG_G0));
+
+ /* subcc arg0, arg2, %g0 */
+ tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
+
+ /* will become:
+ be label1
+ or
+ be,pt %xcc label1 */
+ label1_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* mov (delay slot) */
+ tcg_out_mov(s, arg0, addr_reg);
+
+ /* mov */
+ tcg_out_mov(s, arg1, data_reg);
+
+ /* mov */
+ tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
+
+ /* XXX: move that code at the end of the TB */
+ /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
+ tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_LD_OP);
+
+ /* will become:
+ ba label2 */
+ label2_ptr = (uint32_t *)s->code_ptr;
+ tcg_out32(s, 0);
+
+ /* nop (delay slot) */
+ tcg_out_nop(s);
+
+#if TARGET_LONG_BITS == 32
+ /* be label1 */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+#else
+ /* be,pt %xcc label1 */
+ *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1) |
+ (0x5 << 19) | INSN_OFF19((unsigned long)s->code_ptr -
+ (unsigned long)label1_ptr));
+#endif
+
+ /* ld [arg1 + x], arg1 */
+ tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
+ offsetof(CPUTLBEntry, addr_write), TARGET_ADDEND_LD_OP);
+
+#if TARGET_LONG_BITS == 32
+ /* and addr_reg, x, arg0 */
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
+ tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
+ /* add arg0, arg1, arg0 */
+ tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
+#else
+ /* add addr_reg, arg1, arg0 */
+ tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
+#endif
+
+#else
+ arg0 = addr_reg;
+#endif
+
+ switch(opc) {
+ case 0:
+ /* stb data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STB);
+ break;
+ case 1:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* sth data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STH);
+#else
+ /* stha data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STHA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 2:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* stw data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STW);
+#else
+ /* stwa data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STWA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ case 3:
+#ifdef TARGET_WORDS_BIGENDIAN
+ /* stx data_reg, [arg0] */
+ tcg_out_ldst(s, data_reg, arg0, 0, STX);
+#else
+ /* stxa data_reg, [arg0] ASI_PRIMARY_LITTLE */
+ tcg_out_ldst_asi(s, data_reg, arg0, 0, STXA, ASI_PRIMARY_LITTLE);
+#endif
+ break;
+ default:
+ tcg_abort();
+ }
+
+#if defined(CONFIG_SOFTMMU)
+ /* label2: */
+ *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
+ INSN_OFF22((unsigned long)s->code_ptr -
+ (unsigned long)label2_ptr));
+#endif
+}
+
+static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
+ const int *const_args)
+{
+ int c;
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
+ INSN_IMM13(8));
+ tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
+ INSN_RS2(TCG_REG_G0));
+ break;
+ case INDEX_op_goto_tb:
+ if (s->tb_jmp_offset) {
+ /* direct jump method */
+ tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
+ INSN_IMM13((args[0] & 0x1fff)));
+ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
+ } else {
+ /* indirect jump method */
+ tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(TCG_REG_G0));
+ }
+ tcg_out_nop(s);
+ s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
+ break;
+ case INDEX_op_call:
+ if (const_args[0])
+ tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
+ - (tcg_target_ulong)s->code_ptr) >> 2)
+ & 0x3fffffff));
+ else {
+ tcg_out_ld_ptr(s, TCG_REG_I5,
+ (tcg_target_long)(s->tb_next + args[0]));
+ tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
+ INSN_RS2(TCG_REG_G0));
+ }
+ /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
+ global registers */
+ // delay slot
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_ST_OP);
+ tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
+ sizeof(long), HOST_LD_OP);
+ break;
+ case INDEX_op_jmp:
+ case INDEX_op_br:
+ tcg_out_branch_i32(s, COND_A, args[0]);
+ tcg_out_nop(s);
+ break;
+ case INDEX_op_movi_i32:
+ tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
+ break;
+
+#if TCG_TARGET_REG_BITS == 64
+#define OP_32_64(x) \
+ glue(glue(case INDEX_op_, x), _i32): \
+ glue(glue(case INDEX_op_, x), _i64)
+#else
+#define OP_32_64(x) \
+ glue(glue(case INDEX_op_, x), _i32)
+#endif
+ OP_32_64(ld8u):
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
+ break;
+ OP_32_64(ld8s):
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
+ break;
+ OP_32_64(ld16u):
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
+ break;
+ OP_32_64(ld16s):
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
+ break;
+ case INDEX_op_ld_i32:
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_ld32u_i64:
+#endif
+ tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
+ break;
+ OP_32_64(st8):
+ tcg_out_ldst(s, args[0], args[1], args[2], STB);
+ break;
+ OP_32_64(st16):
+ tcg_out_ldst(s, args[0], args[1], args[2], STH);
+ break;
+ case INDEX_op_st_i32:
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_st32_i64:
+#endif
+ tcg_out_ldst(s, args[0], args[1], args[2], STW);
+ break;
+ OP_32_64(add):
+ c = ARITH_ADD;
+ goto gen_arith;
+ OP_32_64(sub):
+ c = ARITH_SUB;
+ goto gen_arith;
+ OP_32_64(and):
+ c = ARITH_AND;
+ goto gen_arith;
+ OP_32_64(andc):
+ c = ARITH_ANDN;
+ goto gen_arith;
+ OP_32_64(or):
+ c = ARITH_OR;
+ goto gen_arith;
+ OP_32_64(orc):
+ c = ARITH_ORN;
+ goto gen_arith;
+ OP_32_64(xor):
+ c = ARITH_XOR;
+ goto gen_arith;
+ case INDEX_op_shl_i32:
+ c = SHIFT_SLL;
+ goto gen_arith;
+ case INDEX_op_shr_i32:
+ c = SHIFT_SRL;
+ goto gen_arith;
+ case INDEX_op_sar_i32:
+ c = SHIFT_SRA;
+ goto gen_arith;
+ case INDEX_op_mul_i32:
+ c = ARITH_UMUL;
+ goto gen_arith;
+
+ OP_32_64(neg):
+ c = ARITH_SUB;
+ goto gen_arith1;
+ OP_32_64(not):
+ c = ARITH_ORN;
+ goto gen_arith1;
+
+ case INDEX_op_div_i32:
+ tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
+ break;
+ case INDEX_op_divu_i32:
+ tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
+ break;
+
+ case INDEX_op_rem_i32:
+ case INDEX_op_remu_i32:
+ tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2],
+ opc == INDEX_op_remu_i32);
+ tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
+ ARITH_UMUL);
+ tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
+ break;
+
+ case INDEX_op_brcond_i32:
+ tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
+ args[3]);
+ break;
+ case INDEX_op_setcond_i32:
+ tcg_out_setcond_i32(s, args[3], args[0], args[1],
+ args[2], const_args[2]);
+ break;
+
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2_i32(s, args[4], args[0], args[1],
+ args[2], const_args[2],
+ args[3], const_args[3], args[5]);
+ break;
+ case INDEX_op_setcond2_i32:
+ tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
+ args[3], const_args[3],
+ args[4], const_args[4]);
+ break;
+ case INDEX_op_add2_i32:
+ tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
+ ARITH_ADDCC);
+ tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
+ ARITH_ADDX);
+ break;
+ case INDEX_op_sub2_i32:
+ tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
+ ARITH_SUBCC);
+ tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
+ ARITH_SUBX);
+ break;
+ case INDEX_op_mulu2_i32:
+ tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
+ ARITH_UMUL);
+ tcg_out_rdy(s, args[1]);
+ break;
+#endif
+
+ case INDEX_op_qemu_ld8u:
+ tcg_out_qemu_ld(s, args, 0);
+ break;
+ case INDEX_op_qemu_ld8s:
+ tcg_out_qemu_ld(s, args, 0 | 4);
+ break;
+ case INDEX_op_qemu_ld16u:
+ tcg_out_qemu_ld(s, args, 1);
+ break;
+ case INDEX_op_qemu_ld16s:
+ tcg_out_qemu_ld(s, args, 1 | 4);
+ break;
+ case INDEX_op_qemu_ld32u:
+ tcg_out_qemu_ld(s, args, 2);
+ break;
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld(s, args, 2 | 4);
+ break;
+#endif
+ case INDEX_op_qemu_st8:
+ tcg_out_qemu_st(s, args, 0);
+ break;
+ case INDEX_op_qemu_st16:
+ tcg_out_qemu_st(s, args, 1);
+ break;
+ case INDEX_op_qemu_st32:
+ tcg_out_qemu_st(s, args, 2);
+ break;
+
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_movi_i64:
+ tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], LDX);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, args[0], args[1], args[2], STX);
+ break;
+ case INDEX_op_shl_i64:
+ c = SHIFT_SLLX;
+ goto gen_arith;
+ case INDEX_op_shr_i64:
+ c = SHIFT_SRLX;
+ goto gen_arith;
+ case INDEX_op_sar_i64:
+ c = SHIFT_SRAX;
+ goto gen_arith;
+ case INDEX_op_mul_i64:
+ c = ARITH_MULX;
+ goto gen_arith;
+ case INDEX_op_div_i64:
+ c = ARITH_SDIVX;
+ goto gen_arith;
+ case INDEX_op_divu_i64:
+ c = ARITH_UDIVX;
+ goto gen_arith;
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i64:
+ tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2],
+ opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
+ tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
+ ARITH_MULX);
+ tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
+ break;
+ case INDEX_op_ext32s_i64:
+ if (const_args[1]) {
+ tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
+ } else {
+ tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
+ }
+ break;
+ case INDEX_op_ext32u_i64:
+ if (const_args[1]) {
+ tcg_out_movi_imm32(s, args[0], args[1]);
+ } else {
+ tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
+ }
+ break;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
+ args[3]);
+ break;
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond_i64(s, args[3], args[0], args[1],
+ args[2], const_args[2]);
+ break;
+
+ case INDEX_op_qemu_ld64:
+ tcg_out_qemu_ld(s, args, 3);
+ break;
+ case INDEX_op_qemu_st64:
+ tcg_out_qemu_st(s, args, 3);
+ break;
+
+#endif
+ gen_arith:
+ tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
+ break;
+
+ gen_arith1:
+ tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
+ break;
+
+ default:
+ fprintf(stderr, "unknown opcode 0x%x\n", opc);
+ tcg_abort();
+ }
+}
+
+static const TCGTargetOpDef sparc_op_defs[] = {
+ { INDEX_op_exit_tb, { } },
+ { INDEX_op_goto_tb, { } },
+ { INDEX_op_call, { "ri" } },
+ { INDEX_op_jmp, { "ri" } },
+ { INDEX_op_br, { } },
+
+ { INDEX_op_mov_i32, { "r", "r" } },
+ { INDEX_op_movi_i32, { "r" } },
+ { INDEX_op_ld8u_i32, { "r", "r" } },
+ { INDEX_op_ld8s_i32, { "r", "r" } },
+ { INDEX_op_ld16u_i32, { "r", "r" } },
+ { INDEX_op_ld16s_i32, { "r", "r" } },
+ { INDEX_op_ld_i32, { "r", "r" } },
+ { INDEX_op_st8_i32, { "r", "r" } },
+ { INDEX_op_st16_i32, { "r", "r" } },
+ { INDEX_op_st_i32, { "r", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "rJ" } },
+ { INDEX_op_mul_i32, { "r", "r", "rJ" } },
+ { INDEX_op_div_i32, { "r", "r", "rJ" } },
+ { INDEX_op_divu_i32, { "r", "r", "rJ" } },
+ { INDEX_op_rem_i32, { "r", "r", "rJ" } },
+ { INDEX_op_remu_i32, { "r", "r", "rJ" } },
+ { INDEX_op_sub_i32, { "r", "r", "rJ" } },
+ { INDEX_op_and_i32, { "r", "r", "rJ" } },
+ { INDEX_op_andc_i32, { "r", "r", "rJ" } },
+ { INDEX_op_or_i32, { "r", "r", "rJ" } },
+ { INDEX_op_orc_i32, { "r", "r", "rJ" } },
+ { INDEX_op_xor_i32, { "r", "r", "rJ" } },
+
+ { INDEX_op_shl_i32, { "r", "r", "rJ" } },
+ { INDEX_op_shr_i32, { "r", "r", "rJ" } },
+ { INDEX_op_sar_i32, { "r", "r", "rJ" } },
+
+ { INDEX_op_neg_i32, { "r", "rJ" } },
+ { INDEX_op_not_i32, { "r", "rJ" } },
+
+ { INDEX_op_brcond_i32, { "r", "rJ" } },
+ { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
+
+#if TCG_TARGET_REG_BITS == 32
+ { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
+ { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
+ { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
+ { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
+ { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
+#endif
+
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+#if TCG_TARGET_REG_BITS == 64
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+#endif
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+
+#if TCG_TARGET_REG_BITS == 64
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+ { INDEX_op_qemu_ld64, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L" } },
+
+ { INDEX_op_add_i64, { "r", "r", "rJ" } },
+ { INDEX_op_mul_i64, { "r", "r", "rJ" } },
+ { INDEX_op_div_i64, { "r", "r", "rJ" } },
+ { INDEX_op_divu_i64, { "r", "r", "rJ" } },
+ { INDEX_op_rem_i64, { "r", "r", "rJ" } },
+ { INDEX_op_remu_i64, { "r", "r", "rJ" } },
+ { INDEX_op_sub_i64, { "r", "r", "rJ" } },
+ { INDEX_op_and_i64, { "r", "r", "rJ" } },
+ { INDEX_op_andc_i64, { "r", "r", "rJ" } },
+ { INDEX_op_or_i64, { "r", "r", "rJ" } },
+ { INDEX_op_orc_i64, { "r", "r", "rJ" } },
+ { INDEX_op_xor_i64, { "r", "r", "rJ" } },
+
+ { INDEX_op_shl_i64, { "r", "r", "rJ" } },
+ { INDEX_op_shr_i64, { "r", "r", "rJ" } },
+ { INDEX_op_sar_i64, { "r", "r", "rJ" } },
+
+ { INDEX_op_neg_i64, { "r", "rJ" } },
+ { INDEX_op_not_i64, { "r", "rJ" } },
+
+ { INDEX_op_ext32s_i64, { "r", "ri" } },
+ { INDEX_op_ext32u_i64, { "r", "ri" } },
+
+ { INDEX_op_brcond_i64, { "r", "rJ" } },
+ { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
+#endif
+ { -1 },
+};
+
+void tcg_target_init(TCGContext *s)
+{
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
+#if TCG_TARGET_REG_BITS == 64
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
+#endif
+ tcg_regset_set32(tcg_target_call_clobber_regs, 0,
+ (1 << TCG_REG_G1) |
+ (1 << TCG_REG_G2) |
+ (1 << TCG_REG_G3) |
+ (1 << TCG_REG_G4) |
+ (1 << TCG_REG_G5) |
+ (1 << TCG_REG_G6) |
+ (1 << TCG_REG_G7) |
+ (1 << TCG_REG_O0) |
+ (1 << TCG_REG_O1) |
+ (1 << TCG_REG_O2) |
+ (1 << TCG_REG_O3) |
+ (1 << TCG_REG_O4) |
+ (1 << TCG_REG_O5) |
+ (1 << TCG_REG_O7));
+
+ tcg_regset_clear(s->reserved_regs);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
+#if TCG_TARGET_REG_BITS == 64
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
+#endif
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
+ tcg_add_target_add_op_defs(sparc_op_defs);
+}
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
new file mode 100644
index 0000000..dbc574d
--- /dev/null
+++ b/tcg/sparc/tcg-target.h
@@ -0,0 +1,146 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#define TCG_TARGET_SPARC 1
+
+#if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
+#define TCG_TARGET_REG_BITS 64
+#else
+#define TCG_TARGET_REG_BITS 32
+#endif
+
+#define TCG_TARGET_WORDS_BIGENDIAN
+
+#define TCG_TARGET_NB_REGS 32
+
+enum {
+ TCG_REG_G0 = 0,
+ TCG_REG_G1,
+ TCG_REG_G2,
+ TCG_REG_G3,
+ TCG_REG_G4,
+ TCG_REG_G5,
+ TCG_REG_G6,
+ TCG_REG_G7,
+ TCG_REG_O0,
+ TCG_REG_O1,
+ TCG_REG_O2,
+ TCG_REG_O3,
+ TCG_REG_O4,
+ TCG_REG_O5,
+ TCG_REG_O6,
+ TCG_REG_O7,
+ TCG_REG_L0,
+ TCG_REG_L1,
+ TCG_REG_L2,
+ TCG_REG_L3,
+ TCG_REG_L4,
+ TCG_REG_L5,
+ TCG_REG_L6,
+ TCG_REG_L7,
+ TCG_REG_I0,
+ TCG_REG_I1,
+ TCG_REG_I2,
+ TCG_REG_I3,
+ TCG_REG_I4,
+ TCG_REG_I5,
+ TCG_REG_I6,
+ TCG_REG_I7,
+};
+
+#define TCG_CT_CONST_S11 0x100
+#define TCG_CT_CONST_S13 0x200
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_I6
+#ifdef __arch64__
+// Reserve space for AREG0
+#define TCG_TARGET_STACK_MINFRAME (176 + 4 * (int)sizeof(long) + \
+ TCG_STATIC_CALL_ARGS_SIZE)
+#define TCG_TARGET_CALL_STACK_OFFSET (2047 - 16)
+#define TCG_TARGET_STACK_ALIGN 16
+#else
+// AREG0 + one word for alignment
+#define TCG_TARGET_STACK_MINFRAME (92 + (2 + 1) * (int)sizeof(long) + \
+ TCG_STATIC_CALL_ARGS_SIZE)
+#define TCG_TARGET_CALL_STACK_OFFSET TCG_TARGET_STACK_MINFRAME
+#define TCG_TARGET_STACK_ALIGN 8
+#endif
+
+/* optional instructions */
+#define TCG_TARGET_HAS_div_i32
+// #define TCG_TARGET_HAS_rot_i32
+// #define TCG_TARGET_HAS_ext8s_i32
+// #define TCG_TARGET_HAS_ext16s_i32
+// #define TCG_TARGET_HAS_ext8u_i32
+// #define TCG_TARGET_HAS_ext16u_i32
+// #define TCG_TARGET_HAS_bswap16_i32
+// #define TCG_TARGET_HAS_bswap32_i32
+#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_not_i32
+#define TCG_TARGET_HAS_andc_i32
+#define TCG_TARGET_HAS_orc_i32
+
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_div_i64
+// #define TCG_TARGET_HAS_rot_i64
+// #define TCG_TARGET_HAS_ext8s_i64
+// #define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+// #define TCG_TARGET_HAS_ext8u_i64
+// #define TCG_TARGET_HAS_ext16u_i64
+#define TCG_TARGET_HAS_ext32u_i64
+// #define TCG_TARGET_HAS_bswap16_i64
+// #define TCG_TARGET_HAS_bswap32_i64
+// #define TCG_TARGET_HAS_bswap64_i64
+#define TCG_TARGET_HAS_neg_i64
+#define TCG_TARGET_HAS_not_i64
+#define TCG_TARGET_HAS_andc_i64
+#define TCG_TARGET_HAS_orc_i64
+#endif
+
+/* Note: must be synced with dyngen-exec.h and Makefile.target */
+#ifdef CONFIG_SOLARIS
+#define TCG_AREG0 TCG_REG_G2
+#define TCG_AREG1 TCG_REG_G3
+#define TCG_AREG2 TCG_REG_G4
+#elif defined(__sparc_v9__)
+#define TCG_AREG0 TCG_REG_G5
+#define TCG_AREG1 TCG_REG_G6
+#define TCG_AREG2 TCG_REG_G7
+#else
+#define TCG_AREG0 TCG_REG_G6
+#define TCG_AREG1 TCG_REG_G1
+#define TCG_AREG2 TCG_REG_G2
+#endif
+
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long p;
+
+ p = start & ~(8UL - 1UL);
+ stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
+
+ for (; p < stop; p += 8)
+ __asm__ __volatile__("flush\t%0" : : "r" (p));
+}