#define A_COND_AL 0xe
#define A_COND_EQ 0x0
#define A_COND_NE 0x1
+#define A_COND_HS 0x2
+#define A_COND_LO 0x3
#define A_COND_MI 0x4
#define A_COND_PL 0x5
+#define A_COND_VS 0x6
+#define A_COND_VC 0x7
+#define A_COND_HI 0x8
#define A_COND_LS 0x9
+#define A_COND_GE 0xa
+#define A_COND_LT 0xb
+#define A_COND_GT 0xc
#define A_COND_LE 0xd
/* unified conditions */
#define DCOND_NE A_COND_NE
#define DCOND_MI A_COND_MI
#define DCOND_PL A_COND_PL
+#define DCOND_HI A_COND_HI
+#define DCOND_HS A_COND_HS
+#define DCOND_LO A_COND_LO
+#define DCOND_GE A_COND_GE
+#define DCOND_GT A_COND_GT
+#define DCOND_LT A_COND_LT
+#define DCOND_LS A_COND_LS
+#define DCOND_LE A_COND_LE
+#define DCOND_VS A_COND_VS
+#define DCOND_VC A_COND_VC
+#define DCOND_CS A_COND_HS
+#define DCOND_CC A_COND_LO
/* addressing mode 1 */
#define A_AM1_LSL 0
#define A_OP_SUB 0x2
#define A_OP_RSB 0x3
#define A_OP_ADD 0x4
+#define A_OP_ADC 0x5
+#define A_OP_SBC 0x6
#define A_OP_TST 0x8
#define A_OP_TEQ 0x9
#define A_OP_CMP 0xa
#define A_OP_ORR 0xc
#define A_OP_MOV 0xd
#define A_OP_BIC 0xe
+#define A_OP_MVN 0xf
#define EOP_C_DOP_X(cond,op,s,rn,rd,shifter_op) \
EMIT(((cond)<<28) | ((op)<< 21) | ((s)<<20) | ((rn)<<16) | ((rd)<<12) | (shifter_op))
#define EOP_MOV_IMM(rd, ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,0, 0,rd,ror2,imm8)
#define EOP_ORR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ORR,0,rn,rd,ror2,imm8)
+#define EOP_EOR_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_EOR,0,rn,rd,ror2,imm8)
#define EOP_ADD_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_ADD,0,rn,rd,ror2,imm8)
#define EOP_BIC_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_BIC,0,rn,rd,ror2,imm8)
#define EOP_AND_IMM(rd,rn,ror2,imm8) EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,0,rn,rd,ror2,imm8)
#define EOP_MOV_REG(cond,s,rd, rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_MOV,s, 0,rd,shift_imm,shift_op,rm)
#define EOP_ORR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ORR,s,rn,rd,shift_imm,shift_op,rm)
#define EOP_ADD_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADD,s,rn,rd,shift_imm,shift_op,rm)
+#define EOP_ADC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_ADC,s,rn,rd,shift_imm,shift_op,rm)
#define EOP_SUB_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SUB,s,rn,rd,shift_imm,shift_op,rm)
+#define EOP_SBC_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_SBC,s,rn,rd,shift_imm,shift_op,rm)
+#define EOP_AND_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_AND,s,rn,rd,shift_imm,shift_op,rm)
+#define EOP_EOR_REG(cond,s,rd,rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_EOR,s,rn,rd,shift_imm,shift_op,rm)
+#define EOP_CMP_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_CMP,1,rn, 0,shift_imm,shift_op,rm)
#define EOP_TST_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TST,1,rn, 0,shift_imm,shift_op,rm)
#define EOP_TEQ_REG(cond, rn,rm,shift_op,shift_imm) EOP_C_DOP_REG_XIMM(cond,A_OP_TEQ,1,rn, 0,shift_imm,shift_op,rm)
#define EOP_C_MUL(cond,s,rd,rs,rm) \
EMIT(((cond)<<28) | ((s)<<20) | ((rd)<<16) | ((rs)<<8) | 0x90 | (rm))
+#define EOP_C_UMULL(cond,s,rdhi,rdlo,rs,rm) \
+ EMIT(((cond)<<28) | 0x00800000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
+
+#define EOP_C_SMULL(cond,s,rdhi,rdlo,rs,rm) \
+ EMIT(((cond)<<28) | 0x00c00000 | ((s)<<20) | ((rdhi)<<16) | ((rdlo)<<12) | ((rs)<<8) | 0x90 | (rm))
+
#define EOP_MUL(rd,rm,rs) EOP_C_MUL(A_COND_AL,0,rd,rs,rm) // note: rd != rm
#define EOP_C_MRS(cond,rd) \
#define EMITH_SJMP_END(cond) \
(void)(cond)
-#define EMITH_CONDITIONAL(code, is_nonzero) { \
- u32 val, cond, *ptr; \
- cond = (is_nonzero) ? A_COND_NE : A_COND_EQ; \
- ptr = (void *)tcache_ptr; \
- tcache_ptr = (void *)(ptr + 1); \
- code; \
- val = (u32 *)tcache_ptr - (ptr + 2); \
- EMIT_PTR(ptr, ((cond)<<28) | 0x0a000000 | (val & 0xffffff)); \
-}
-
#define emith_move_r_r(d, s) \
EOP_MOV_REG_SIMPLE(d, s)
+#define emith_or_r_r_r_lsl(d, s1, s2, lslimm) \
+ EOP_ORR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
+
+#define emith_eor_r_r_r_lsl(d, s1, s2, lslimm) \
+ EOP_EOR_REG(A_COND_AL,0,d,s1,s2,A_AM1_LSL,lslimm)
+
+#define emith_or_r_r_r(d, s1, s2) \
+ emith_or_r_r_r_lsl(d, s1, s2, 0)
+
+#define emith_eor_r_r_r(d, s1, s2) \
+ emith_eor_r_r_r_lsl(d, s1, s2, 0)
+
#define emith_add_r_r(d, s) \
EOP_ADD_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
#define emith_sub_r_r(d, s) \
EOP_SUB_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
+#define emith_and_r_r(d, s) \
+ EOP_AND_REG(A_COND_AL,0,d,d,s,A_AM1_LSL,0)
+
+#define emith_or_r_r(d, s) \
+ emith_or_r_r_r(d, d, s)
+
+#define emith_eor_r_r(d, s) \
+ emith_eor_r_r_r(d, d, s)
+
+#define emith_tst_r_r(d, s) \
+ EOP_TST_REG(A_COND_AL,d,s,A_AM1_LSL,0)
+
#define emith_teq_r_r(d, s) \
EOP_TEQ_REG(A_COND_AL,d,s,A_AM1_LSL,0)
+#define emith_cmp_r_r(d, s) \
+ EOP_CMP_REG(A_COND_AL,d,s,A_AM1_LSL,0)
+
+#define emith_addf_r_r(d, s) \
+ EOP_ADD_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
+
#define emith_subf_r_r(d, s) \
EOP_SUB_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
+#define emith_adcf_r_r(d, s) \
+ EOP_ADC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
+
+#define emith_sbcf_r_r(d, s) \
+ EOP_SBC_REG(A_COND_AL,1,d,d,s,A_AM1_LSL,0)
+
#define emith_move_r_imm(r, imm) \
emith_op_imm(A_COND_AL, 0, A_OP_MOV, r, imm)
#define emith_or_r_imm_c(cond, r, imm) \
emith_op_imm(cond, 0, A_OP_ORR, r, imm)
+#define emith_bic_r_imm_c(cond, r, imm) \
+ emith_op_imm(cond, 0, A_OP_BIC, r, imm)
+
#define emith_lsl(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSL,cnt)
#define emith_lsr(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
+#define emith_lslf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
+
#define emith_asrf(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
EOP_MUL(d, s2, s1); \
}
+#define emith_mul_u64(dlo, dhi, s1, s2) \
+ EOP_C_UMULL(A_COND_AL,0,dhi,dlo,s1,s2)
+
+#define emith_mul_s64(dlo, dhi, s1, s2) \
+ EOP_C_SMULL(A_COND_AL,0,dhi,dlo,s1,s2)
+
+// misc
#define emith_ctx_read(r, offs) \
EOP_LDR_IMM(r, CONTEXT_REG, offs)
EOP_MOV_REG_ASR(d,d,32 - (bits)); \
}
+// put bit0 of r0 to carry
+#define emith_set_carry(r0) \
+ EOP_TST_REG(A_COND_AL,r0,r0,A_AM1_LSR,1) /* shift out to carry */ \
+
+// put bit0 of r0 to carry (for subtraction, inverted on ARM)
+#define emith_set_carry_sub(r0) { \
+ int t = rcache_get_tmp(); \
+ EOP_EOR_IMM(t,r0,0,1); /* invert */ \
+ EOP_MOV_REG(A_COND_AL,1,t,t,A_AM1_LSR,1); /* shift out to carry */ \
+ rcache_free_tmp(t); \
+}
+
#define host_arg2reg(rd, arg) \
rd = arg
+/*
+ * note about silly things like emith_eor_r_r_r_lsl:
+ * these are here because the compiler was designed
+ * for ARM as it's primary target.
+ */
#include <stdarg.h>
enum { xAX = 0, xCX, xDX, xBX, xSP, xBP, xSI, xDI };
#define CONTEXT_REG xBP
+#define IOP_JO 0x70
+#define IOP_JNO 0x71
+#define IOP_JB 0x72
+#define IOP_JAE 0x73
#define IOP_JE 0x74
#define IOP_JNE 0x75
#define IOP_JBE 0x76
#define IOP_JA 0x77
#define IOP_JS 0x78
#define IOP_JNS 0x79
+#define IOP_JL 0x7c
+#define IOP_JGE 0x7d
#define IOP_JLE 0x7e
+#define IOP_JG 0x7f
// unified conditions (we just use rel8 jump instructions for x86)
#define DCOND_EQ IOP_JE
#define DCOND_NE IOP_JNE
#define DCOND_MI IOP_JS // MInus
#define DCOND_PL IOP_JNS // PLus or zero
+#define DCOND_HI IOP_JA // higher (unsigned)
+#define DCOND_HS IOP_JAE // higher || same (unsigned)
+#define DCOND_LO IOP_JB // lower (unsigned)
+#define DCOND_LS IOP_JBE // lower || same (unsigned)
+#define DCOND_GE IOP_JGE // greater || equal (signed)
+#define DCOND_GT IOP_JG // greater (signed)
+#define DCOND_LE IOP_JLE // less || equal (signed)
+#define DCOND_LT IOP_JL // less (signed)
+#define DCOND_VS IOP_JO // oVerflow Set
+#define DCOND_VC IOP_JNO // oVerflow Clear
+#define DCOND_CS IOP_JB // Carry Set
+#define DCOND_CC IOP_JAE // Carry Clear
#define EMIT_PTR(ptr, val, type) \
*(type *)(ptr) = val
EMIT_PTR(ptr, op, u8); \
EMIT_PTR(ptr + 1, (tcache_ptr - (ptr+2)), u8)
+// _r_r
#define emith_move_r_r(dst, src) \
EMIT_OP_MODRM(0x8b, 3, dst, src)
#define emith_sub_r_r(d, s) \
EMIT_OP_MODRM(0x29, 3, s, d)
+#define emith_adc_r_r(d, s) \
+ EMIT_OP_MODRM(0x11, 3, s, d)
+
+#define emith_sbc_r_r(d, s) \
+ EMIT_OP_MODRM(0x19, 3, s, d) /* SBB */
+
#define emith_or_r_r(d, s) \
EMIT_OP_MODRM(0x09, 3, s, d)
+#define emith_and_r_r(d, s) \
+ EMIT_OP_MODRM(0x21, 3, s, d)
+
#define emith_eor_r_r(d, s) \
- EMIT_OP_MODRM(0x31, 3, s, d)
+ EMIT_OP_MODRM(0x31, 3, s, d) /* XOR */
+
+#define emith_tst_r_r(d, s) \
+ EMIT_OP_MODRM(0x85, 3, s, d) /* TEST */
+
+#define emith_cmp_r_r(d, s) \
+ EMIT_OP_MODRM(0x39, 3, s, d)
// fake teq - test equivalence - get_flags(d ^ s)
#define emith_teq_r_r(d, s) { \
emith_pop(d); \
}
+// _r_r_r
+#define emith_eor_r_r_r(d, s1, s2) { \
+ if (d != s1) \
+ emith_move_r_r(d, s1); \
+ emith_eor_r_r(d, s2); \
+}
+
+#define emith_or_r_r_r_lsl(d, s1, s2, lslimm) { \
+ if (d != s2 && d != s1) { \
+ emith_lsl(d, s2, lslimm); \
+ emith_or_r_r(d, s1); \
+ } else { \
+ if (d != s1) \
+ emith_move_r_r(d, s1); \
+ emith_push(s2); \
+ emith_lsl(s2, s2, lslimm); \
+ emith_or_r_r(d, s2); \
+ emith_pop(s2); \
+ } \
+}
+
// _r_imm
#define emith_move_r_imm(r, imm) { \
EMIT_OP(0xb8 + (r)); \
// fake conditionals (using SJMP instead)
#define emith_add_r_imm_c(cond, r, imm) { \
(void)(cond); \
- emith_arith_r_imm(0, r, imm); \
+ emith_add_r_imm(r, imm); \
}
#define emith_or_r_imm_c(cond, r, imm) { \
(void)(cond); \
- emith_arith_r_imm(1, r, imm); \
+ emith_or_r_imm(r, imm); \
}
#define emith_sub_r_imm_c(cond, r, imm) { \
(void)(cond); \
- emith_arith_r_imm(5, r, imm); \
+ emith_sub_r_imm(r, imm); \
+}
+
+#define emith_bic_r_imm_c(cond, r, imm) { \
+ (void)(cond); \
+ emith_bic_r_imm(r, imm); \
}
// shift
EMIT(cnt, u8); \
}
-#define emith_asr(d, s, cnt) \
- emith_shift(7, d, s, cnt)
-
#define emith_lsl(d, s, cnt) \
emith_shift(4, d, s, cnt)
+#define emith_lsr(d, s, cnt) \
+ emith_shift(5, d, s, cnt)
+
+#define emith_asr(d, s, cnt) \
+ emith_shift(7, d, s, cnt)
+
// misc
#define emith_push(r) \
EMIT_OP(0x50 + (r))
emith_asr(d, d, 32 - (bits)); \
}
+// put bit0 of r0 to carry
+#define emith_set_carry(r0) { \
+ emith_tst_r_imm(r0, 1); /* clears C */ \
+ EMITH_SJMP_START(DCOND_EQ); \
+ EMIT_OP(0xf9); /* STC */ \
+ EMITH_SJMP_END(DCOND_EQ); \
+}
+
+// put bit0 of r0 to carry (for subtraction)
+#define emith_set_carry_sub emith_set_carry
+
// XXX: stupid mess
-#define emith_mul(d, s1, s2) { \
+#define emith_mul_(op, dlo, dhi, s1, s2) { \
int rmr; \
- if (d != xAX) \
+ if (dlo != xAX && dhi != xAX) \
emith_push(xAX); \
+ if (dlo != xDX && dhi != xDX) \
+ emith_push(xDX); \
if ((s1) == xAX) \
rmr = s2; \
else if ((s2) == xAX) \
emith_move_r_r(xAX, s1); \
rmr = s2; \
} \
- emith_push(xDX); \
- EMIT_OP_MODRM(0xf7, 3, 4, rmr); /* MUL rmr */ \
- emith_pop(xDX); \
- if (d != xAX) { \
- emith_move_r_r(d, xAX); \
+ EMIT_OP_MODRM(0xf7, 3, op, rmr); /* xMUL rmr */ \
+ /* XXX: using push/pop for the case of edx->eax; eax->edx */ \
+ if (dhi != xDX && dhi != -1) \
+ emith_push(xDX); \
+ if (dlo != xAX) \
+ emith_move_r_r(dlo, xAX); \
+ if (dhi != xDX && dhi != -1) \
+ emith_pop(dhi); \
+ if (dlo != xDX && dhi != xDX) \
+ emith_pop(xDX); \
+ if (dlo != xAX && dhi != xAX) \
emith_pop(xAX); \
- } \
}
+#define emith_mul_u64(dlo, dhi, s1, s2) \
+ emith_mul_(4, dlo, dhi, s1, s2) /* MUL */
+
+#define emith_mul_s64(dlo, dhi, s1, s2) \
+ emith_mul_(5, dlo, dhi, s1, s2) /* IMUL */
+
+#define emith_mul(d, s1, s2) \
+ emith_mul_(4, d, -1, s1, s2)
+
// "flag" instructions are the same
#define emith_subf_r_imm emith_sub_r_imm
+#define emith_addf_r_r emith_add_r_r
#define emith_subf_r_r emith_sub_r_r
+#define emith_adcf_r_r emith_adc_r_r
+#define emith_sbcf_r_r emith_sbc_r_r
+
+#define emith_lslf emith_lsl
+#define emith_lsrf emith_lsr
+#define emith_asrf emith_asr
// XXX: offs is 8bit only
#define emith_ctx_read(r, offs) { \
} temp_reg_t;
// note: reg_temp[] must have at least the amount of
-// registers used by handlers in worst case (currently 3?)
+// registers used by handlers in worst case (currently 4)
#ifdef ARM
#include "../drc/emit_arm.c"
-1, -1, -1, -1,
};
-// ax, cx, dx are usually temporaries
+// ax, cx, dx are usually temporaries by convention
static temp_reg_t reg_temp[] = {
{ xAX, },
+ { xBX, },
{ xCX, },
{ xDX, },
};
MOVA @(disp,PC),R0 11000111dddddddd
SWAP.B Rm,Rn 0110nnnnmmmm1000
SWAP.W Rm,Rn 0110nnnnmmmm1001
-XTRCT Rm,Rn 0010nnnnmmmm1101
-ADD Rm,Rn 0011nnnnmmmm1100
ADD #imm,Rn 0111nnnniiiiiiii
-ADDC Rm,Rn 0011nnnnmmmm1110
-ADDV Rm,Rn 0011nnnnmmmm1111
CMP/EQ #imm,R0 10001000iiiiiiii
-CMP/EQ Rm,Rn 0011nnnnmmmm0000
-CMP/HS Rm,Rn 0011nnnnmmmm0010
-CMP/GE Rm,Rn 0011nnnnmmmm0011
-CMP/HI Rm,Rn 0011nnnnmmmm0110
-CMP/GT Rm,Rn 0011nnnnmmmm0111
CMP/PZ Rn 0100nnnn00010001
CMP/PL Rn 0100nnnn00010101
-CMP/ST Rm,Rn 0010nnnnmmmm1100
-DIV1 Rm,Rn 0011nnnnmmmm0100
-DMULS. Rm,Rn 0011nnnnmmmm1101
-DMULU.L Rm,Rn 0011nnnnmmmm0101
EXTS.B Rm,Rn 0110nnnnmmmm1110
EXTS.W Rm,Rn 0110nnnnmmmm1111
EXTU.B Rm,Rn 0110nnnnmmmm1100
EXTU.W Rm,Rn 0110nnnnmmmm1101
MAC @Rm+,@Rn+ 0100nnnnmmmm1111
-MULS.W Rm,Rn 0010nnnnmmmm1111
-MULU.W Rm,Rn 0010nnnnmmmm1110
NEG Rm,Rn 0110nnnnmmmm1011
NEGC Rm,Rn 0110nnnnmmmm1010
-SUB Rm,Rn 0011nnnnmmmm1000
-SUBC Rm,Rn 0011nnnnmmmm1010
-SUBV Rm,Rn 0011nnnnmmmm1011
-AND Rm,Rn 0010nnnnmmmm1001
AND #imm,R0 11001001iiiiiiii
AND.B #imm,@(R0,GBR) 11001101iiiiiiii
NOT Rm,Rn 0110nnnnmmmm0111
-OR Rm,Rn 0010nnnnmmmm1011
OR #imm,R0 11001011iiiiiiii
OR.B #imm,@(R0,GBR) 11001111iiiiiiii
TAS.B @Rn 0100nnnn00011011
-TST Rm,Rn 0010nnnnmmmm1000
TST #imm,R0 11001000iiiiiiii
TST.B #imm,@(R0,GBR) 11001100iiiiiiii
-XOR Rm,Rn 0010nnnnmmmm1010
XOR #imm,R0 11001010iiiiiiii
XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
ROTL Rn 0100nnnn00000100
ROTR Rn 0100nnnn00000101
ROTCL Rn 0100nnnn00100100
ROTCR Rn 0100nnnn00100101
-SHAL Rn 0100nnnn00100000
SHAR Rn 0100nnnn00100001
-SHLL Rn 0100nnnn00000000
SHLR Rn 0100nnnn00000001
SHLL2 Rn 0100nnnn00001000
SHLR2 Rn 0100nnnn00001001
int op, delayed_op = 0, test_irq = 0;
int tcache_id = 0, blkid = 0;
int cycles = 0;
- u32 tmp, tmp2, tmp3;
+ u32 tmp, tmp2, tmp3, tmp4;
// validate PC
tmp = sh2->pc >> 29;
switch ((op >> 12) & 0x0f)
{
+ /////////////////////////////////////////////
case 0x00:
switch (op & 0x0f)
{
}
goto default_;
+ /////////////////////////////////////////////
case 0x01:
// MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
rcache_clean();
emith_or_r_imm_c(DCOND_MI, tmp, T);
EMITH_SJMP_END(DCOND_PL);
goto end_op;
+ case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
+ tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_bic_r_imm(tmp, T);
+ emith_tst_r_r(tmp2, tmp3);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp, T);
+ EMITH_SJMP_END(DCOND_NE);
+ goto end_op;
+ case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_and_r_r(tmp, tmp2);
+ goto end_op;
+ case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_eor_r_r(tmp, tmp2);
+ goto end_op;
+ case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_or_r_r(tmp, tmp2);
+ goto end_op;
+ case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
+ tmp = rcache_get_tmp();
+ tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_eor_r_r_r(tmp, tmp2, tmp3);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp2, T);
+ emith_tst_r_imm(tmp, 0x000000ff);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp2, T);
+ EMITH_SJMP_END(DCOND_NE);
+ emith_tst_r_imm(tmp, 0x0000ff00);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp2, T);
+ EMITH_SJMP_END(DCOND_NE);
+ emith_tst_r_imm(tmp, 0x00ff0000);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp2, T);
+ EMITH_SJMP_END(DCOND_NE);
+ emith_tst_r_imm(tmp, 0xff000000);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp2, T);
+ EMITH_SJMP_END(DCOND_NE);
+ rcache_free_tmp(tmp);
+ goto end_op;
+ case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_lsr(tmp, tmp, 16);
+ emith_or_r_r_r_lsl(tmp, tmp, tmp2, 16);
+ goto end_op;
+ case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
+ case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
+ tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
+ if (op & 1) {
+ emith_sext(tmp, tmp2, 16);
+ } else
+ emith_clear_msb(tmp, tmp2, 16);
+ tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ tmp2 = rcache_get_tmp();
+ if (op & 1) {
+ emith_sext(tmp2, tmp3, 16);
+ } else
+ emith_clear_msb(tmp2, tmp3, 16);
+ emith_mul(tmp, tmp, tmp2);
+ rcache_free_tmp(tmp2);
+// FIXME: causes timing issues in Doom?
+// cycles++;
+ goto end_op;
}
goto default_;
+ /////////////////////////////////////////////
+ case 0x03:
+ switch (op & 0x0f)
+ {
+ case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
+ case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
+ case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
+ case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
+ case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
+ tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ emith_bic_r_imm(tmp, T);
+ emith_cmp_r_r(tmp2, tmp3);
+ switch (op & 0x07)
+ {
+ case 0x00: // CMP/EQ
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp, T);
+ EMITH_SJMP_END(DCOND_NE);
+ break;
+ case 0x02: // CMP/HS
+ EMITH_SJMP_START(DCOND_LO);
+ emith_or_r_imm_c(DCOND_HS, tmp, T);
+ EMITH_SJMP_END(DCOND_LO);
+ break;
+ case 0x03: // CMP/GE
+ EMITH_SJMP_START(DCOND_LT);
+ emith_or_r_imm_c(DCOND_GE, tmp, T);
+ EMITH_SJMP_END(DCOND_LT);
+ break;
+ case 0x06: // CMP/HI
+ EMITH_SJMP_START(DCOND_LS);
+ emith_or_r_imm_c(DCOND_HI, tmp, T);
+ EMITH_SJMP_END(DCOND_LS);
+ break;
+ case 0x07: // CMP/GT
+ EMITH_SJMP_START(DCOND_LE);
+ emith_or_r_imm_c(DCOND_GT, tmp, T);
+ EMITH_SJMP_END(DCOND_LE);
+ break;
+ }
+ goto end_op;
+ case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
+ // TODO
+ break;
+ case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
+ tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
+ emith_mul_u64(tmp3, tmp4, tmp, tmp2);
+ goto end_op;
+ case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
+ case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ if (op & 4) {
+ emith_add_r_r(tmp, tmp2);
+ } else
+ emith_sub_r_r(tmp, tmp2);
+ goto end_op;
+ case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
+ case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ tmp3 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ if (op & 4) { // adc
+ emith_set_carry(tmp3);
+ emith_adcf_r_r(tmp, tmp2);
+ tmp = DCOND_CS; // set condition
+ tmp2 = DCOND_CC; // clear condition
+ } else {
+ emith_set_carry_sub(tmp3);
+ emith_sbcf_r_r(tmp, tmp2);
+ tmp = DCOND_LO; // using LO/HS instead of CS/CC
+ tmp2 = DCOND_HS; // due to ARM target..
+ }
+ EMITH_SJMP_START(tmp);
+ emith_bic_r_imm_c(tmp2, tmp3, T);
+ EMITH_SJMP_END(tmp);
+ EMITH_SJMP_START(tmp2);
+ emith_or_r_imm_c(tmp, tmp3, T);
+ EMITH_SJMP_END(tmp2);
+ goto end_op;
+ case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
+ case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ tmp3 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp3, T);
+ if (op & 4) {
+ emith_addf_r_r(tmp, tmp2);
+ } else
+ emith_subf_r_r(tmp, tmp2);
+ EMITH_SJMP_START(DCOND_VC);
+ emith_or_r_imm_c(DCOND_VS, tmp3, T);
+ EMITH_SJMP_END(DCOND_VC);
+ goto end_op;
+ case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
+ tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
+ tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
+ emith_mul_s64(tmp3, tmp4, tmp, tmp2);
+ goto end_op;
+ }
+ goto default_;
+
+ /////////////////////////////////////////////
case 0x04:
- switch (op & 0x0f) {
+ switch (op & 0x0f)
+ {
case 0x00:
- if ((op & 0xf0) != 0x10)
- goto default_;
- // DT Rn 0100nnnn00010000
- if (p32x_sh2_read16(pc, sh2) == 0x8bfd) { // BF #-2
- emith_sh2_dtbf_loop();
+ switch (GET_Fx())
+ {
+ case 0: // SHLL Rn 0100nnnn00000000
+ case 2: // SHAL Rn 0100nnnn00100000
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp2, T);
+ emith_lslf(tmp, tmp, 1);
+ EMITH_SJMP_START(DCOND_CC);
+ emith_or_r_imm_c(DCOND_CS, tmp2, T);
+ EMITH_SJMP_END(DCOND_CC);
+ goto end_op;
+ case 1: // DT Rn 0100nnnn00010000
+ if (p32x_sh2_read16(pc, sh2) == 0x8bfd) { // BF #-2
+ emith_sh2_dtbf_loop();
+ goto end_op;
+ }
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp2, T);
+ emith_subf_r_imm(tmp, 1);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_or_r_imm_c(DCOND_EQ, tmp2, T);
+ EMITH_SJMP_END(DCOND_NE);
goto end_op;
}
- tmp = rcache_get_reg((op >> 8) & 0x0f, RC_GR_RMW);
- tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
- emith_bic_r_imm(tmp2, T);
- emith_subf_r_imm(tmp, 1);
- EMITH_SJMP_START(DCOND_NE);
- emith_or_r_imm_c(DCOND_EQ, tmp2, T);
- EMITH_SJMP_END(DCOND_NE);
- goto end_op;
+ goto default_;
case 0x07:
if ((op & 0xf0) != 0)
goto default_;
}
goto default_;
+ /////////////////////////////////////////////
case 0x08:
switch (op & 0x0f00) {
// BT/S label 10001101dddddddd
}}
goto default_;
+ /////////////////////////////////////////////
case 0x0a:
// BRA label 1010dddddddddddd
DELAYED_OP;
cycles++;
break;
+ /////////////////////////////////////////////
case 0x0b:
// BSR label 1011dddddddddddd
DELAYED_OP;
global sh2_drc_entry ; SH2 *sh2, void *block
sh2_drc_entry:
+ push ebx
push ebp
- mov ebp, [esp+4+4] ; context
- mov eax, [esp+4+8]
+ mov ebp, [esp+8+4] ; context
+ mov eax, [esp+8+8]
jmp eax
global sh2_drc_exit
sh2_drc_exit:
pop ebp
+ pop ebx
ret
pico/carthw/svp/compiler.o : ../../cpu/drc/emit_arm.c
cpu/sh2/compiler.o : ../../cpu/drc/emit_x86.c
+cpu/sh2/mame/sh2pico.o : ../../cpu/sh2/mame/sh2.c
pico/pico.o pico/cd/pico.o : ../../pico/pico_cmn.c ../../pico/pico_int.h
pico/memory.o pico/cd/memory.o : ../../pico/pico_int.h ../../pico/memory.h