+/* handle writes r0 to (rX). Trashes r1.
+ * fortunately we can ignore modulo increment modes for writes. */
+static void tr_rX_write(int op)
+{
+ if ((op&3) == 3)
+ {
+ int mod = (op>>2) & 3; // direct addressing
+ tr_bank_write((op & 0x100) + mod);
+ }
+ else
+ {
+ int r = (op&3) | ((op>>6)&4);
+ if (known_regb & (1 << (r + 8))) {
+ tr_bank_write((op&0x100) | known_regs.r[r]);
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ int ror = ((4 - (r&3))*8) & 0x1f;
+ EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
+ if (r >= 4)
+ EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
+ if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
+ else EOP_ADD_REG_LSL(1,7,1,1);
+ EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
+ hostreg_r[1] = -1;
+ }
+ tr_ptrr_mod(r, (op>>2) & 3, 0, 1);
+ }
+}
+
+/* read (rX) to r0. Trashes r1-r3. */
+static void tr_rX_read(int r, int mod)
+{
+ if ((r&3) == 3)
+ {
+ tr_bank_read(((r << 6) & 0x100) + mod); // direct addressing
+ }
+ else
+ {
+ if (known_regb & (1 << (r + 8))) {
+ tr_bank_read(((r << 6) & 0x100) | known_regs.r[r]);
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ int ror = ((4 - (r&3))*8) & 0x1f;
+ EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
+ if (r >= 4)
+ EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
+ if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
+ else EOP_ADD_REG_LSL(1,7,1,1);
+ EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
+ hostreg_r[0] = hostreg_r[1] = -1;
+ }
+ tr_ptrr_mod(r, mod, 1, 1);
+ }
+}
+
+/* read ((rX)) to r0. Trashes r1,r2. */
+static void tr_rX_read2(int op)
+{
+ int r = (op&3) | ((op>>6)&4); // src
+
+ if ((r&3) == 3) {
+ tr_bank_read((op&0x100) | ((op>>2)&3));
+ } else if (known_regb & (1 << (r+8))) {
+ tr_bank_read((op&0x100) | known_regs.r[r]);
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ int ror = ((4 - (r&3))*8) & 0x1f;
+ EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
+ if (r >= 4)
+ EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
+ if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
+ else EOP_ADD_REG_LSL(1,7,1,1);
+ EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
+ }
+ EOP_LDR_IMM(2,7,0x48c); // ptr_iram_rom
+ EOP_ADD_REG_LSL(2,2,0,1); // add r2, r2, r0, lsl #1
+ EOP_ADD_IMM(0,0,0,1); // add r0, r0, #1
+ if ((r&3) == 3) {
+ tr_bank_write((op&0x100) | ((op>>2)&3));
+ } else if (known_regb & (1 << (r+8))) {
+ tr_bank_write((op&0x100) | known_regs.r[r]);
+ } else {
+ EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
+ hostreg_r[1] = -1;
+ }
+ EOP_LDRH_SIMPLE(0,2); // ldrh r0, [r2]
+ hostreg_r[0] = hostreg_r[2] = -1;
+}
+
+/* get ARM cond which would mean that SSP cond is satisfied. No trash. */
+static int tr_cond_check(int op)
+{
+ int f = (op & 0x100) >> 8;
+ switch (op&0xf0) {
+ case 0x00: return A_COND_AL; /* always true */
+ case 0x50: /* Z matches f(?) bit */
+ if (dirty_regb & KRREG_ST) return f ? A_COND_EQ : A_COND_NE;
+ EOP_TST_IMM(6, 0, 4);
+ return f ? A_COND_NE : A_COND_EQ;
+ case 0x70: /* N matches f(?) bit */
+ if (dirty_regb & KRREG_ST) return f ? A_COND_MI : A_COND_PL;
+ EOP_TST_IMM(6, 0, 8);
+ return f ? A_COND_NE : A_COND_EQ;
+ default:
+ printf("unimplemented cond?\n");
+ tr_unhandled();
+ return 0;
+ }
+}
+
+static int tr_neg_cond(int cond)
+{
+ switch (cond) {
+ case A_COND_AL: printf("neg for AL?\n"); exit(1);
+ case A_COND_EQ: return A_COND_NE;
+ case A_COND_NE: return A_COND_EQ;
+ case A_COND_MI: return A_COND_PL;
+ case A_COND_PL: return A_COND_MI;
+ default: printf("bad cond for neg\n"); exit(1);
+ }
+ return 0;
+}
+
+// SSP_GR0, SSP_X, SSP_Y, SSP_A,
+// SSP_ST, SSP_STACK, SSP_PC, SSP_P,
+//@ r4: XXYY
+//@ r5: A
+//@ r6: STACK and emu flags
+//@ r7: SSP context
+//@ r10: P
+
+// read general reg to r0. Trashes r1
+static void tr_GR0_to_r0(void)
+{
+ tr_mov16(0, 0xffff);
+}
+
+static void tr_X_to_r0(void)
+{
+ if (hostreg_r[0] != (SSP_X<<16)) {
+ EOP_MOV_REG_LSR(0, 4, 16); // mov r0, r4, lsr #16
+ hostreg_r[0] = SSP_X<<16;
+ }
+}
+
+static void tr_Y_to_r0(void)
+{
+ // TODO..
+ if (hostreg_r[0] != (SSP_Y<<16)) {
+ EOP_MOV_REG_SIMPLE(0, 4); // mov r0, r4
+ hostreg_r[0] = SSP_Y<<16;
+ }
+}
+
+static void tr_A_to_r0(void)
+{
+ if (hostreg_r[0] != (SSP_A<<16)) {
+ EOP_MOV_REG_LSR(0, 5, 16); // mov r0, r5, lsr #16 @ AH
+ hostreg_r[0] = SSP_A<<16;
+ }
+}
+
+static void tr_ST_to_r0(void)
+{
+ // VR doesn't need much accuracy here..
+ EOP_MOV_REG_LSR(0, 6, 4); // mov r0, r6, lsr #4
+ EOP_AND_IMM(0, 0, 0, 0x67); // and r0, r0, #0x67
+ hostreg_r[0] = -1;
+}
+
+static void tr_STACK_to_r0(void)
+{
+ // 448
+ EOP_SUB_IMM(6, 6, 8/2, 0x20); // sub r6, r6, #1<<29
+ EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
+ EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
+ EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
+ EOP_LDRH_SIMPLE(0, 1); // ldrh r0, [r1]
+ hostreg_r[0] = hostreg_r[1] = -1;
+}
+
+static void tr_PC_to_r0(void)
+{
+ tr_mov16(0, known_regs.gr[SSP_PC].h);
+}
+
+static void tr_P_to_r0(void)
+{
+ tr_flush_dirty_P();
+ EOP_MOV_REG_LSR(0, 10, 16); // mov r0, r10, lsr #16
+ hostreg_r[0] = -1;
+}
+
+typedef void (tr_read_func)(void);
+
+static tr_read_func *tr_read_funcs[8] =
+{
+ tr_GR0_to_r0,
+ tr_X_to_r0,
+ tr_Y_to_r0,
+ tr_A_to_r0,
+ tr_ST_to_r0,
+ tr_STACK_to_r0,
+ tr_PC_to_r0,
+ tr_P_to_r0
+};
+
+
+// write r0 to general reg handlers. Trashes r1
+#define TR_WRITE_R0_TO_REG(reg) \
+{ \
+ hostreg_sspreg_changed(reg); \
+ hostreg_r[0] = (reg)<<16; \
+ if (const_val != -1) { \
+ known_regs.gr[reg].h = const_val; \
+ known_regb |= 1 << (reg); \
+ } else { \
+ known_regb &= ~(1 << (reg)); \
+ } \
+}
+
+static void tr_r0_to_GR0(int const_val)
+{
+ // do nothing
+}
+
+static void tr_r0_to_X(int const_val)
+{
+ EOP_MOV_REG_LSL(4, 4, 16); // mov r4, r4, lsl #16
+ EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
+ EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
+ dirty_regb |= KRREG_P; // touching X or Y makes P dirty.
+ TR_WRITE_R0_TO_REG(SSP_X);
+}
+
+static void tr_r0_to_Y(int const_val)
+{
+ EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
+ EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
+ EOP_MOV_REG_ROR(4, 4, 16); // mov r4, r4, ror #16
+ dirty_regb |= KRREG_P;
+ TR_WRITE_R0_TO_REG(SSP_Y);
+}
+
+static void tr_r0_to_A(int const_val)
+{
+ EOP_MOV_REG_LSL(5, 5, 16); // mov r5, r5, lsl #16
+ EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16 @ AL
+ EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
+ TR_WRITE_R0_TO_REG(SSP_A);
+}
+
+static void tr_r0_to_ST(int const_val)
+{
+ // VR doesn't need much accuracy here..
+ EOP_AND_IMM(1, 0, 0, 0x67); // and r1, r0, #0x67
+ EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
+ EOP_ORR_REG_LSL(6, 6, 1, 4); // orr r6, r6, r1, lsl #4
+ TR_WRITE_R0_TO_REG(SSP_ST);
+ hostreg_r[1] = -1;
+ dirty_regb &= ~KRREG_ST;
+}
+
+static void tr_r0_to_STACK(int const_val)
+{
+ // 448
+ EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
+ EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
+ EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
+ EOP_STRH_SIMPLE(0, 1); // strh r0, [r1]
+ EOP_ADD_IMM(6, 6, 8/2, 0x20); // add r6, r6, #1<<29
+ hostreg_r[1] = -1;
+}
+
+static void tr_r0_to_PC(int const_val)
+{
+ EOP_MOV_REG_LSL(1, 0, 16); // mov r1, r0, lsl #16
+ EOP_STR_IMM(1,7,0x400+6*4); // str r1, [r7, #(0x400+6*8)]
+ hostreg_r[1] = -1;
+}
+
+typedef void (tr_write_func)(int const_val);
+
+static tr_write_func *tr_write_funcs[8] =
+{
+ tr_r0_to_GR0,
+ tr_r0_to_X,
+ tr_r0_to_Y,
+ tr_r0_to_A,
+ tr_r0_to_ST,
+ tr_r0_to_STACK,
+ tr_r0_to_PC,
+ (tr_write_func *)tr_unhandled
+};
+
+static void tr_mac_load_XY(int op)
+{
+ tr_rX_read(op&3, (op>>2)&3); // X
+ EOP_MOV_REG_LSL(4, 0, 16);
+ tr_rX_read(((op>>4)&3)|4, (op>>6)&3); // Y
+ EOP_ORR_REG_SIMPLE(4, 0);
+ dirty_regb |= KRREG_P;
+ hostreg_sspreg_changed(SSP_X);
+ hostreg_sspreg_changed(SSP_Y);
+ known_regb &= ~KRREG_X;
+ known_regb &= ~KRREG_Y;
+}
+
+static int tr_aop_ssp2arm(int op)
+{
+ switch (op) {
+ case 1: return A_OP_SUB;
+ case 3: return A_OP_CMP;
+ case 4: return A_OP_ADD;
+ case 5: return A_OP_AND;
+ case 6: return A_OP_ORR;
+ case 7: return A_OP_EOR;
+ }
+
+ tr_unhandled();
+ return 0;
+}
+
+static int translate_op(unsigned int op, int *pc, int imm)
+{
+ u32 tmpv, tmpv2, tmpv3;
+ int ret = 0;
+ known_regs.gr[SSP_PC].h = *pc;
+
+ switch (op >> 9)
+ {
+ // ld d, s
+ case 0x00:
+ if (op == 0) { ret++; break; } // nop
+ tmpv = op & 0xf; // src
+ tmpv2 = (op >> 4) & 0xf; // dst
+ if (tmpv >= 8 || tmpv2 >= 8) return -1; // TODO
+ if (tmpv2 == SSP_A && tmpv == SSP_P) { // ld A, P
+ tr_flush_dirty_P();
+ EOP_MOV_REG_SIMPLE(5, 10);
+ hostreg_sspreg_changed(SSP_A); \
+ known_regb &= ~(KRREG_A|KRREG_AL);
+ ret++; break;
+ }
+ tr_read_funcs[tmpv]();
+ tr_write_funcs[tmpv2]((known_regb & (1 << tmpv)) ? known_regs.gr[tmpv].h : -1);
+ ret++; break;
+
+ // ld d, (ri)
+ case 0x01: {
+ // tmpv = ptr1_read(op); REG_WRITE((op & 0xf0) >> 4, tmpv); break;
+ int r = (op&3) | ((op>>6)&4);
+ int mod = (op>>2)&3;
+ tmpv = (op >> 4) & 0xf; // dst
+ if (tmpv >= 8) return -1; // TODO
+ if (tmpv != 0)
+ tr_rX_read(r, mod);
+ else tr_ptrr_mod(r, mod, 1, 1);
+ tr_write_funcs[tmpv](-1);
+ ret++; break;
+ }
+
+ // ld (ri), s
+ case 0x02:
+ tmpv = (op >> 4) & 0xf; // src
+ if (tmpv >= 8) return -1; // TODO
+ tr_read_funcs[tmpv]();
+ tr_rX_write(op);
+ ret++; break;
+
+ // ld a, adr
+ case 0x03:
+ tr_bank_read(op&0x1ff);
+ tr_r0_to_A(-1);
+ ret++; break;
+
+ // ldi d, imm
+ case 0x04:
+ tmpv = (op & 0xf0) >> 4;
+ if (tmpv < 8)
+ {
+ tr_mov16(0, imm);
+ tr_write_funcs[tmpv](imm);
+ ret += 2; break;
+ }
+ else if (tmpv == 0xe && (PROGRAM(*pc) >> 9) == 4)
+ {
+ // programming PMC..
+ (*pc)++;
+ tmpv = imm | (PROGRAM((*pc)++) << 16);
+ ret += 2;
+ emit_mov_const(A_COND_AL, 0, tmpv);
+ EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
+ EOP_STR_IMM(0,7,0x400+14*4); // PMC
+ // reads on fe06, fe08; next op is ld -,
+ if ((tmpv == 0x187f03 || tmpv == 0x187f04) && (PROGRAM(*pc) & 0xfff0) == 0)
+ {
+ int flag = (tmpv == 0x187f03) ? SSP_WAIT_30FE06 : SSP_WAIT_30FE08;
+ tr_flush_dirty_ST();
+ EOP_LDR_IMM(0,7,0x490); // dram_ptr
+ EOP_ADD_IMM(0,0,24/2,0xfe); // add r0, r0, #0xfe00
+ EOP_LDRH_IMM(0,0,(tmpv == 0x187f03) ? 6 : 8); // ldrh r0, [r0, #8]
+ EOP_TST_REG_SIMPLE(0,0);
+ EOP_C_DOP_IMM(A_COND_EQ,A_OP_ADD,0,11,11,22/2,1); // add r11, r11, #1024
+ EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1,24/2,flag>>8); // orr r1, r1, #SSP_WAIT_30FE08
+ }
+ EOP_ORR_IMM(1,1,0,SSP_PMC_SET); // orr r1, r1, #SSP_PMC_SET
+ EOP_STR_IMM(1,7,0x484); // str r1, [r7, #0x484] // emu_status
+ hostreg_r[0] = hostreg_r[1] = -1;
+ ret += 2; break;
+ }
+ else
+ return -1; /* TODO.. */
+
+ // ld d, ((ri))
+ case 0x05:
+ tmpv2 = (op >> 4) & 0xf; // dst
+ if (tmpv2 >= 8) return -1; // TODO
+ tr_rX_read2(op);
+ tr_write_funcs[tmpv2](-1);
+ ret += 3; break;
+
+ // ldi (ri), imm
+ case 0x06:
+ tr_mov16(0, imm);
+ tr_rX_write(op);
+ ret += 2; break;
+
+ // ld adr, a
+ case 0x07:
+ tr_A_to_r0();
+ tr_bank_write(op&0x1ff);
+ ret++; break;
+
+ // ld d, ri
+ case 0x09: {
+ int r;
+ r = (op&3) | ((op>>6)&4); // src
+ tmpv2 = (op >> 4) & 0xf; // dst
+ if (tmpv2 >= 8) tr_unhandled();
+ if ((r&3) == 3) tr_unhandled();
+
+ if (known_regb & (1 << (r+8))) {
+ tr_mov16(0, known_regs.r[r]);
+ tr_write_funcs[tmpv2](known_regs.r[r]);
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
+ EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
+ hostreg_r[0] = -1;
+ tr_write_funcs[tmpv2](-1);
+ }
+ ret++; break;
+ }
+
+ // ld ri, s
+ case 0x0a: {
+ int r;
+ r = (op&3) | ((op>>6)&4); // dst
+ tmpv = (op >> 4) & 0xf; // src
+ if (tmpv >= 8) tr_unhandled();
+ if ((r&3) == 3) tr_unhandled();
+
+ if (known_regb & (1 << tmpv)) {
+ known_regs.r[r] = known_regs.gr[tmpv].h;
+ known_regb |= 1 << (r + 8);
+ dirty_regb |= 1 << (r + 8);
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ int ror = ((4 - (r&3))*8) & 0x1f;
+ tr_read_funcs[tmpv]();
+ EOP_BIC_IMM(reg, reg, ror/2, 0xff); // bic r{7,8}, r{7,8}, <mask>
+ EOP_AND_IMM(0, 0, 0, 0xff); // and r0, r0, 0xff
+ EOP_ORR_REG_LSL(reg, reg, 0, (r&3)*8); // orr r{7,8}, r{7,8}, r0, lsl #lsl
+ hostreg_r[0] = -1;
+ known_regb &= ~(1 << (r+8));
+ dirty_regb &= ~(1 << (r+8));
+ }
+ ret++; break;
+ }
+
+ // ldi ri, simm
+ case 0x0c ... 0x0f:
+ tmpv = (op>>8)&7;
+ known_regs.r[tmpv] = op;
+ known_regb |= 1 << (tmpv + 8);
+ dirty_regb |= 1 << (tmpv + 8);
+ ret++; break;
+
+ // call cond, addr
+ case 0x24: {
+ u32 *jump_op = NULL;
+ tmpv = tr_cond_check(op);
+ if (tmpv != A_COND_AL) {
+ jump_op = tcache_ptr;
+ EOP_MOV_IMM(0, 0, 0); // placeholder for branch
+ }
+ tr_mov16(0, *pc);
+ tr_r0_to_STACK(*pc);
+ if (tmpv != A_COND_AL) {
+ u32 *real_ptr = tcache_ptr;
+ tcache_ptr = jump_op;
+ EOP_C_B(tr_neg_cond(tmpv),0,real_ptr - jump_op - 2);
+ tcache_ptr = real_ptr;
+ }
+ tr_mov16_cond(tmpv, 0, imm);
+ if (tmpv != A_COND_AL) {
+ tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
+ }
+ tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
+ ret += 2; break;
+ }
+
+ // ld d, (a)
+ case 0x25:
+ tmpv2 = (op >> 4) & 0xf; // dst
+ if (tmpv2 >= 8) return -1; // TODO
+
+ tr_A_to_r0();
+ EOP_LDR_IMM(1,7,0x48c); // ptr_iram_rom
+ EOP_ADD_REG_LSL(0,1,0,1); // add r0, r1, r0, lsl #1
+ EOP_LDRH_SIMPLE(0,0); // ldrh r0, [r0]
+ hostreg_r[0] = hostreg_r[1] = -1;
+ tr_write_funcs[tmpv2](-1);
+ ret += 3; break;
+
+ // bra cond, addr
+ case 0x26:
+ tmpv = tr_cond_check(op);
+ tr_mov16_cond(tmpv, 0, imm);
+ if (tmpv != A_COND_AL) {
+ tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
+ }
+ tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
+ ret += 2; break;
+
+ // mod cond, op
+ case 0x48: {
+ // check for repeats of this op
+ tmpv = 1; // count
+ while (PROGRAM(*pc) == op && (op & 7) != 6) {
+ (*pc)++; tmpv++;
+ }
+ if ((op&0xf0) != 0) // !always
+ tr_make_dirty_ST();
+
+ tmpv2 = tr_cond_check(op);
+ switch (op & 7) {
+ case 2: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_ASR,5); break; // shr (arithmetic)
+ case 3: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_LSL,5); break; // shl
+ case 6: EOP_C_DOP_IMM(tmpv2,A_OP_RSB,1,5,5,0,0); break; // neg
+ case 7: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_EOR,0,5,1,31,A_AM1_ASR,5); // eor r1, r5, r5, asr #31
+ EOP_C_DOP_REG_XIMM(tmpv2,A_OP_ADD,1,1,5,31,A_AM1_LSR,5); // adds r5, r1, r5, lsr #31
+ hostreg_r[1] = -1; break; // abs
+ default: tr_unhandled();
+ }
+
+ hostreg_sspreg_changed(SSP_A);
+ dirty_regb |= KRREG_ST;
+ known_regb &= ~KRREG_ST;
+ known_regb &= ~(KRREG_A|KRREG_AL);
+ ret += tmpv; break;
+ }
+
+ // mpys?
+ case 0x1b:
+ tr_flush_dirty_P();
+ tr_mac_load_XY(op);
+ tr_make_dirty_ST();
+ EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_SUB,1,5,5,0,A_AM1_LSL,10); // subs r5, r5, r10
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+
+ // mpya (rj), (ri), b
+ case 0x4b:
+ tr_flush_dirty_P();
+ tr_mac_load_XY(op);
+ tr_make_dirty_ST();
+ EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_ADD,1,5,5,0,A_AM1_LSL,10); // adds r5, r5, r10
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+
+ // mld (rj), (ri), b
+ case 0x5b:
+ EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,1,0,5,0,0); // movs r5, #0
+ hostreg_sspreg_changed(SSP_A);
+ known_regs.gr[SSP_A].v = 0;
+ known_regb |= (KRREG_A|KRREG_AL);
+ dirty_regb |= KRREG_ST;
+ tr_mac_load_XY(op);
+ ret++; break;
+
+ // OP a, s
+ case 0x10:
+ case 0x30:
+ case 0x40:
+ case 0x50:
+ case 0x60:
+ case 0x70:
+ tmpv = op & 0xf; // src
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ if (tmpv >= 8) return -1; // TODO
+ if (tmpv == SSP_P) {
+ tr_flush_dirty_P();
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL,10); // OPs r5, r5, r10
+ } else if (tmpv == SSP_A) {
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL, 5); // OPs r5, r5, r5
+ } else {
+ tr_read_funcs[tmpv]();
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL, 0); // OPs r5, r5, r0, lsl #16
+ }
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+
+ // OP a, (ri)
+ case 0x11:
+ case 0x31:
+ case 0x41:
+ case 0x51:
+ case 0x61:
+ case 0x71:
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ tr_rX_read((op&3)|((op>>6)&4), (op>>2)&3);
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+
+ // OP a, adr
+ case 0x13:
+ case 0x33:
+ case 0x43:
+ case 0x53:
+ case 0x63:
+ case 0x73:
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ tr_bank_read(op&0x1ff);
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+
+ // OP a, imm
+ case 0x14:
+ case 0x34:
+ case 0x44:
+ case 0x54:
+ case 0x64:
+ case 0x74:
+ tmpv = (op & 0xf0) >> 4;
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ tr_mov16(0, imm);
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret += 2; break;
+
+ // OP a, ((ri))
+ case 0x15:
+ case 0x35:
+ case 0x45:
+ case 0x55:
+ case 0x65:
+ case 0x75:
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ tr_rX_read2(op);
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret += 3; break;
+
+ // OP a, ri
+ case 0x19:
+ case 0x39:
+ case 0x49:
+ case 0x59:
+ case 0x69:
+ case 0x79: {
+ int r;
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ r = (op&3) | ((op>>6)&4); // src
+ if ((r&3) == 3) tr_unhandled();
+
+ if (known_regb & (1 << (r+8))) {
+ EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,known_regs.r[r]); // OPs r5, r5, #val<<16
+ } else {
+ int reg = (r < 4) ? 8 : 9;
+ if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
+ EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
+ EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
+ hostreg_r[0] = -1;
+ }
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+ }
+
+ // OP simm
+ case 0x1c:
+ case 0x3c:
+ case 0x4c:
+ case 0x5c:
+ case 0x6c:
+ case 0x7c:
+ tmpv2 = tr_aop_ssp2arm(op>>13); // op
+ tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
+ EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,op & 0xff); // OPs r5, r5, #val<<16
+ hostreg_sspreg_changed(SSP_A);
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
+ dirty_regb |= KRREG_ST;
+ ret++; break;
+ }
+
+ return ret;