#define A_COND_LT 0xb
#define A_COND_GT 0xc
#define A_COND_LE 0xd
+#define A_COND_CS A_COND_HS
+#define A_COND_CC A_COND_LO
/* unified conditions */
#define DCOND_EQ A_COND_EQ
#define DCOND_LE A_COND_LE
#define DCOND_VS A_COND_VS
#define DCOND_VC A_COND_VC
-#define DCOND_CS A_COND_HS
-#define DCOND_CC A_COND_LO
/* addressing mode 1 */
#define A_AM1_LSL 0
if (op == A_OP_MOV)
rn = 0;
- else if (op == A_OP_TST || op == A_OP_TEQ)
- rd = 0;
else if (imm == 0)
return;
}
}
+// test op
+#define emith_top_imm(cond, op, r, imm) { \
+ u32 ror2, v; \
+ for (ror2 = 0, v = imm; v && !(v & 3); v >>= 2) \
+ ror2--; \
+ EOP_C_DOP_IMM(cond, op, 1, r, 0, ror2 & 0x0f, v & 0xff); \
+}
+
#define is_offset_24(val) \
((val) >= (int)0xff000000 && (val) <= 0x00ffffff)
#define emith_or_r_imm(r, imm) \
emith_op_imm(A_COND_AL, 0, A_OP_ORR, r, imm)
-// note: use 8bit imm only
+// note: only use 8bit imm for these
#define emith_tst_r_imm(r, imm) \
- emith_op_imm(A_COND_AL, 1, A_OP_TST, r, imm)
+ emith_top_imm(A_COND_AL, A_OP_TST, r, imm)
+
+#define emith_cmp_r_imm(r, imm) \
+ emith_top_imm(A_COND_AL, A_OP_CMP, r, imm)
#define emith_subf_r_imm(r, imm) \
emith_op_imm(A_COND_AL, 1, A_OP_SUB, r, imm)
#define emith_lsr(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_LSR,cnt)
+#define emith_ror(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,0,d,s,A_AM1_ROR,cnt)
+
#define emith_lslf(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSL,cnt)
+#define emith_lsrf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_LSR,cnt)
+
#define emith_asrf(d, s, cnt) \
EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ASR,cnt)
+// note: only C flag updated correctly
+#define emith_rolf(d, s, cnt) { \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,32-(cnt)); \
+ /* we don't have ROL so we shift to get the right carry */ \
+ EOP_TST_REG(A_COND_AL,d,d,A_AM1_LSR,1); \
+}
+
+#define emith_rorf(d, s, cnt) \
+ EOP_MOV_REG(A_COND_AL,1,d,s,A_AM1_ROR,cnt)
+
+#define emith_rolcf(d) \
+ emith_adcf_r_r(d, d)
+
+#define emith_rorcf(d) \
+ EOP_MOV_REG(A_COND_AL,1,d,d,A_AM1_ROR,0) /* ROR #0 -> RRX */
+
#define emith_mul(d, s1, s2) { \
if ((d) != (s1)) /* rd != rm limitation */ \
EOP_MUL(d, s1, s2); \
rcache_free_tmp(tmp); \
}
+#define emith_write_sr(srcr) { \
+ int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
+ emith_lsr(srr, srr, 12); \
+ emith_or_r_r_r_lsl(srr, srr, srcr, 20); \
+ emith_ror(srr, srr, 20); \
+}
+
+#define emith_carry_to_t(srr, is_sub) { \
+ if (is_sub) { /* has inverted C on ARM */ \
+ emith_or_r_imm_c(A_COND_CC, srr, 1); \
+ emith_bic_r_imm_c(A_COND_CS, srr, 1); \
+ } else { \
+ emith_or_r_imm_c(A_COND_CS, srr, 1); \
+ emith_bic_r_imm_c(A_COND_CC, srr, 1); \
+ } \
+}
#define DCOND_LT IOP_JL // less (signed)
#define DCOND_VS IOP_JO // oVerflow Set
#define DCOND_VC IOP_JNO // oVerflow Clear
-#define DCOND_CS IOP_JB // Carry Set
-#define DCOND_CC IOP_JAE // Carry Clear
#define EMIT_PTR(ptr, val, type) \
*(type *)(ptr) = val
EMIT(imm, u32); \
}
-// 2 - adc, 3 - sbb, 6 - xor, 7 - cmp
+// 2 - adc, 3 - sbb, 6 - xor
#define emith_add_r_imm(r, imm) \
emith_arith_r_imm(0, r, imm)
#define emith_sub_r_imm(r, imm) \
emith_arith_r_imm(5, r, imm)
+#define emith_cmp_r_imm(r, imm) \
+ emith_arith_r_imm(7, r, imm)
+
#define emith_tst_r_imm(r, imm) { \
EMIT_OP_MODRM(0xf7, 3, 0, r); \
EMIT(imm, u32); \
#define emith_asr(d, s, cnt) \
emith_shift(7, d, s, cnt)
+#define emith_rol(d, s, cnt) \
+ emith_shift(0, d, s, cnt)
+
+#define emith_ror(d, s, cnt) \
+ emith_shift(1, d, s, cnt)
+
+#define emith_rolc(r) \
+ EMIT_OP_MODRM(0xd1, 3, 2, r)
+
+#define emith_rorc(r) \
+ EMIT_OP_MODRM(0xd1, 3, 3, r)
+
// misc
#define emith_push(r) \
EMIT_OP(0x50 + (r))
#define emith_adcf_r_r emith_adc_r_r
#define emith_sbcf_r_r emith_sbc_r_r
-#define emith_lslf emith_lsl
-#define emith_lsrf emith_lsr
-#define emith_asrf emith_asr
+#define emith_lslf emith_lsl
+#define emith_lsrf emith_lsr
+#define emith_asrf emith_asr
+#define emith_rolf emith_rol
+#define emith_rorf emith_ror
+#define emith_rolcf emith_rolc
+#define emith_rorcf emith_rorc
// XXX: offs is 8bit only
#define emith_ctx_read(r, offs) { \
rcache_free_tmp(tmp); \
}
+#define emith_write_sr(srcr) { \
+ int tmp = rcache_get_tmp(); \
+ int srr = rcache_get_reg(SHR_SR, RC_GR_RMW); \
+ emith_clear_msb(tmp, srcr, 20); \
+ emith_bic_r_imm(srr, 0xfff); \
+ emith_or_r_r(srr, tmp); \
+ rcache_free_tmp(tmp); \
+}
+
+#define emith_carry_to_t(srr, is_sub) { \
+ int tmp = rcache_get_tmp(); \
+ EMIT_OP(0x0f); \
+ EMIT(0x92, u8); \
+ EMIT_MODRM(3, 0, tmp); /* SETC */ \
+ emith_bic_r_imm(srr, 1); \
+ EMIT_OP_MODRM(0x08, 3, tmp, srr); /* OR srrl, tmpl */ \
+ rcache_free_tmp(tmp); \
+}
+
SWAP.W Rm,Rn 0110nnnnmmmm1001
ADD #imm,Rn 0111nnnniiiiiiii
CMP/EQ #imm,R0 10001000iiiiiiii
-CMP/PZ Rn 0100nnnn00010001
-CMP/PL Rn 0100nnnn00010101
EXTS.B Rm,Rn 0110nnnnmmmm1110
EXTS.W Rm,Rn 0110nnnnmmmm1111
EXTU.B Rm,Rn 0110nnnnmmmm1100
TST.B #imm,@(R0,GBR) 11001100iiiiiiii
XOR #imm,R0 11001010iiiiiiii
XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
-ROTL Rn 0100nnnn00000100
-ROTR Rn 0100nnnn00000101
-ROTCL Rn 0100nnnn00100100
-ROTCR Rn 0100nnnn00100101
-SHAR Rn 0100nnnn00100001
-SHLR Rn 0100nnnn00000001
SHLL2 Rn 0100nnnn00001000
SHLR2 Rn 0100nnnn00001001
SHLL8 Rn 0100nnnn00011000
SHLR16 Rn 0100nnnn00101001
LDC Rm,GBR 0100mmmm00011110
LDC Rm,VBR 0100mmmm00101110
-LDC.L @Rm+,GBR 0100mmmm00010111
-LDC.L @Rm+,VBR 0100mmmm00100111
LDS Rm,MACH 0100mmmm00001010
LDS Rm,MACL 0100mmmm00011010
LDS Rm,PR 0100mmmm00101010
-LDS.L @Rm+,MACH 0100mmmm00000110
-LDS.L @Rm+,MACL 0100mmmm00010110
-LDS.L @Rm+,PR 0100mmmm00100110
-STC.L SR,@–Rn 0100nnnn00000011
-STC.L GBR,@–Rn 0100nnnn00010011
-STC.L VBR,@–Rn 0100nnnn00100011
-STS.L MACH,@–Rn 0100nnnn00000010
-STS.L MACL,@–Rn 0100nnnn00010010
-STS.L PR,@–Rn 0100nnnn00100010
TRAPA #imm 11000011iiiiiiii
*/
#define GET_Rn() \
((op >> 8) & 0x0f)
-#define CHECK_FX_GT_3() \
- if (GET_Fx() > 3) \
+#define CHECK_FX_LT(n) \
+ if (GET_Fx() < n) \
goto default_
static void *sh2_translate(SH2 *sh2, block_desc *other_block)
default:
goto default_;
}
- tmp2 = rcache_get_reg(tmp2, RC_GR_READ);
- emith_move_r_r(tmp, tmp2);
+ tmp3 = rcache_get_reg(tmp2, RC_GR_READ);
+ emith_move_r_r(tmp, tmp3);
+ if (tmp2 == SHR_SR)
+ emith_clear_msb(tmp, tmp, 20); // reserved bits defined by ISA as 0
goto end_op;
case 0x03:
CHECK_UNHANDLED_BITS(0xd0);
switch (GET_Fx())
{
case 0: // STS MACH,Rn 0000nnnn00001010
- tmp2 = rcache_get_reg(SHR_MACH, RC_GR_READ);
+ tmp2 = SHR_MACH;
break;
case 1: // STS MACL,Rn 0000nnnn00011010
- tmp2 = rcache_get_reg(SHR_MACL, RC_GR_READ);
+ tmp2 = SHR_MACL;
break;
case 2: // STS PR,Rn 0000nnnn00101010
- tmp2 = rcache_get_reg(SHR_PR, RC_GR_READ);
+ tmp2 = SHR_PR;
break;
default:
goto default_;
}
+ tmp2 = rcache_get_reg(tmp2, RC_GR_READ);
emith_move_r_r(tmp, tmp2);
goto end_op;
case 0x0b:
if (op & 4) { // adc
emith_set_carry(tmp3);
emith_adcf_r_r(tmp, tmp2);
- tmp = DCOND_CS; // set condition
- tmp2 = DCOND_CC; // clear condition
+ emith_carry_to_t(tmp3, 0);
} else {
emith_set_carry_sub(tmp3);
emith_sbcf_r_r(tmp, tmp2);
- tmp = DCOND_LO; // using LO/HS instead of CS/CC
- tmp2 = DCOND_HS; // due to ARM target..
+ emith_carry_to_t(tmp3, 1);
}
- EMITH_SJMP_START(tmp);
- emith_bic_r_imm_c(tmp2, tmp3, T);
- EMITH_SJMP_END(tmp);
- EMITH_SJMP_START(tmp2);
- emith_or_r_imm_c(tmp, tmp3, T);
- EMITH_SJMP_END(tmp2);
goto end_op;
case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
case 2: // SHAL Rn 0100nnnn00100000
tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
- emith_bic_r_imm(tmp2, T);
emith_lslf(tmp, tmp, 1);
- EMITH_SJMP_START(DCOND_CC);
- emith_or_r_imm_c(DCOND_CS, tmp2, T);
- EMITH_SJMP_END(DCOND_CC);
+ emith_carry_to_t(tmp2, 0);
goto end_op;
case 1: // DT Rn 0100nnnn00010000
if (p32x_sh2_read16(pc, sh2) == 0x8bfd) { // BF #-2
goto end_op;
}
goto default_;
- case 0x07:
- if ((op & 0xf0) != 0)
+ case 0x01:
+ switch (GET_Fx())
+ {
+ case 0: // SHLR Rn 0100nnnn00000001
+ case 2: // SHAR Rn 0100nnnn00100001
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ if (op & 0x20) {
+ emith_asrf(tmp, tmp, 1);
+ } else
+ emith_lsrf(tmp, tmp, 1);
+ emith_carry_to_t(tmp2, 0);
+ goto end_op;
+ case 1: // CMP/PZ Rn 0100nnnn00010001
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp2, T);
+ emith_cmp_r_imm(tmp, 0);
+ EMITH_SJMP_START(DCOND_LT);
+ emith_or_r_imm_c(DCOND_GE, tmp2, T);
+ EMITH_SJMP_END(DCOND_LT);
+ goto end_op;
+ }
+ goto default_;
+ case 0x02:
+ case 0x03:
+ switch (op & 0x3f)
+ {
+ case 0x02: // STS.L MACH,@–Rn 0100nnnn00000010
+ tmp = SHR_MACH;
+ break;
+ case 0x12: // STS.L MACL,@–Rn 0100nnnn00010010
+ tmp = SHR_MACL;
+ break;
+ case 0x22: // STS.L PR,@–Rn 0100nnnn00100010
+ tmp = SHR_PR;
+ break;
+ case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
+ tmp = SHR_SR;
+ break;
+ case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
+ tmp = SHR_GBR;
+ break;
+ case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
+ tmp = SHR_VBR;
+ break;
+ default:
goto default_;
- // LDC.L @Rm+,SR 0100mmmm00000111
- test_irq = 1;
+ }
+ tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ emith_sub_r_imm(tmp2, 4);
+ rcache_clean();
+ rcache_get_reg_arg(0, GET_Rn());
+ tmp3 = rcache_get_reg_arg(1, tmp);
+ if (tmp == SHR_SR)
+ emith_clear_msb(tmp3, tmp3, 20); // reserved bits defined by ISA as 0
+ emit_memhandler_write(2);
+ goto end_op;
+ case 0x04:
+ case 0x05:
+ switch (op & 0x3f)
+ {
+ case 0x04: // ROTL Rn 0100nnnn00000100
+ case 0x05: // ROTR Rn 0100nnnn00000101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ if (op & 1) {
+ emith_rorf(tmp, tmp, 1);
+ } else
+ emith_rolf(tmp, tmp, 1);
+ emith_carry_to_t(tmp2, 0);
+ goto end_op;
+ case 0x24: // ROTCL Rn 0100nnnn00100100
+ case 0x25: // ROTCR Rn 0100nnnn00100101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_set_carry(tmp2);
+ if (op & 1) {
+ emith_rorcf(tmp);
+ } else
+ emith_rolcf(tmp);
+ emith_carry_to_t(tmp2, 0);
+ goto end_op;
+ case 0x15: // CMP/PL Rn 0100nnnn00010101
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ tmp2 = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(tmp2, T);
+ emith_cmp_r_imm(tmp, 0);
+ EMITH_SJMP_START(DCOND_LE);
+ emith_or_r_imm_c(DCOND_GT, tmp2, T);
+ EMITH_SJMP_END(DCOND_LE);
+ goto end_op;
+ }
goto default_;
+ case 0x06:
+ case 0x07:
+ switch (op & 0x3f)
+ {
+ case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
+ tmp = SHR_MACH;
+ break;
+ case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
+ tmp = SHR_MACL;
+ break;
+ case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
+ tmp = SHR_PR;
+ break;
+ case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
+ tmp = SHR_SR;
+ break;
+ case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
+ tmp = SHR_GBR;
+ break;
+ case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
+ tmp = SHR_VBR;
+ break;
+ default:
+ goto default_;
+ }
+ rcache_clean();
+ rcache_get_reg_arg(0, GET_Rn());
+ tmp2 = emit_memhandler_read(2);
+ if (tmp == SHR_SR) {
+ emith_write_sr(tmp2);
+ test_irq = 1;
+ } else {
+ tmp = rcache_get_reg(tmp, RC_GR_WRITE);
+ emith_move_r_r(tmp, tmp2);
+ }
+ rcache_free_tmp(tmp2);
+ tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
+ emith_add_r_imm(tmp, 4);
+ goto end_op;
case 0x0b:
if ((op & 0xd0) != 0)
goto default_;