1 // SSP1601 to ARM recompiler
3 // (c) Copyright 2008, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
6 #include "../../pico_int.h"
7 #include "../../../cpu/drc/cmn.h"
10 // FIXME: asm has these hardcoded
11 #define SSP_BLOCKTAB_ENTS (0x5090/2)
12 #define SSP_BLOCKTAB_IRAM_ONE (0x800/2) // table entries
13 #define SSP_BLOCKTAB_IRAM_ENTS (15*SSP_BLOCKTAB_IRAM_ONE)
15 static u32 **ssp_block_table; // [0x5090/2];
16 static u32 **ssp_block_table_iram; // [15][0x800/2];
18 static u32 *tcache_ptr = NULL;
20 static int nblocks = 0;
21 static int n_in_ops = 0;
23 extern ssp1601_t *ssp;
25 #define rPC ssp->gr[SSP_PC].h
26 #define rPMC ssp->gr[SSP_PMC]
28 #define SSP_FLAG_Z (1<<0xd)
29 #define SSP_FLAG_N (1<<0xf)
32 //#define DUMP_BLOCK 0x0c9a
33 void ssp_drc_next(void){}
34 void ssp_drc_next_patch(void){}
35 void ssp_drc_end(void){}
39 #include "../../../cpu/drc/emit_arm.c"
41 // -----------------------------------------------------
43 static int get_inc(int mode)
45 int inc = (mode >> 11) & 7;
48 inc = 1 << inc; // 0 1 2 4 8 16 32 128
49 if (mode & 0x8000) inc = -inc; // decrement mode
54 u32 ssp_pm_read(int reg)
58 if (ssp->emu_status & SSP_PMC_SET)
60 ssp->pmac_read[reg] = rPMC.v;
61 ssp->emu_status &= ~SSP_PMC_SET;
66 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
68 mode = ssp->pmac_read[reg]>>16;
69 if ((mode & 0xfff0) == 0x0800) // ROM
71 d = ((unsigned short *)Pico.rom)[ssp->pmac_read[reg]&0xfffff];
72 ssp->pmac_read[reg] += 1;
74 else if ((mode & 0x47ff) == 0x0018) // DRAM
76 unsigned short *dram = (unsigned short *)svp->dram;
77 int inc = get_inc(mode);
78 d = dram[ssp->pmac_read[reg]&0xffff];
79 ssp->pmac_read[reg] += inc;
82 // PMC value corresponds to last PMR accessed
83 rPMC.v = ssp->pmac_read[reg];
88 #define overwrite_write(dst, d) \
90 if (d & 0xf000) { dst &= ~0xf000; dst |= d & 0xf000; } \
91 if (d & 0x0f00) { dst &= ~0x0f00; dst |= d & 0x0f00; } \
92 if (d & 0x00f0) { dst &= ~0x00f0; dst |= d & 0x00f0; } \
93 if (d & 0x000f) { dst &= ~0x000f; dst |= d & 0x000f; } \
96 void ssp_pm_write(u32 d, int reg)
101 if (ssp->emu_status & SSP_PMC_SET)
103 ssp->pmac_write[reg] = rPMC.v;
104 ssp->emu_status &= ~SSP_PMC_SET;
109 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
111 dram = (unsigned short *)svp->dram;
112 mode = ssp->pmac_write[reg]>>16;
113 addr = ssp->pmac_write[reg]&0xffff;
114 if ((mode & 0x43ff) == 0x0018) // DRAM
116 int inc = get_inc(mode);
118 overwrite_write(dram[addr], d);
119 } else dram[addr] = d;
120 ssp->pmac_write[reg] += inc;
122 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
125 overwrite_write(dram[addr], d);
126 } else dram[addr] = d;
127 ssp->pmac_write[reg] += (addr&1) ? 0x1f : 1;
129 else if ((mode & 0x47ff) == 0x001c) // IRAM
131 int inc = get_inc(mode);
132 ((unsigned short *)svp->iram_rom)[addr&0x3ff] = d;
133 ssp->pmac_write[reg] += inc;
134 ssp->drc.iram_dirty = 1;
137 rPMC.v = ssp->pmac_write[reg];
141 // -----------------------------------------------------
144 static unsigned char iram_context_map[] =
146 0, 0, 0, 0, 1, 0, 0, 0, // 04
147 0, 0, 0, 0, 0, 0, 2, 0, // 0e
148 0, 0, 0, 0, 0, 3, 0, 4, // 15 17
149 5, 0, 0, 6, 0, 7, 0, 0, // 18 1b 1d
150 8, 9, 0, 0, 0,10, 0, 0, // 20 21 25
151 0, 0, 0, 0, 0, 0, 0, 0,
152 0, 0,11, 0, 0,12, 0, 0, // 32 35
153 13,14, 0, 0, 0, 0, 0, 0 // 38 39
156 int ssp_get_iram_context(void)
158 unsigned char *ir = (unsigned char *)svp->iram_rom;
159 int val1, val = ir[0x083^1] + ir[0x4FA^1] + ir[0x5F7^1] + ir[0x47B^1];
160 val1 = iram_context_map[(val>>1)&0x3f];
163 elprintf(EL_ANOMALY, "svp: iram ctx val: %02x PC=%04x\n", (val>>1)&0x3f, rPC);
164 //debug_dump2file(name, svp->iram_rom, 0x800);
170 // -----------------------------------------------------
172 /* regs with known values */
177 unsigned int pmac_read[5];
178 unsigned int pmac_write[5];
180 unsigned int emu_status;
183 #define KRREG_X (1 << SSP_X)
184 #define KRREG_Y (1 << SSP_Y)
185 #define KRREG_A (1 << SSP_A) /* AH only */
186 #define KRREG_ST (1 << SSP_ST)
187 #define KRREG_STACK (1 << SSP_STACK)
188 #define KRREG_PC (1 << SSP_PC)
189 #define KRREG_P (1 << SSP_P)
190 #define KRREG_PR0 (1 << 8)
191 #define KRREG_PR4 (1 << 12)
192 #define KRREG_AL (1 << 16)
193 #define KRREG_PMCM (1 << 18) /* only mode word of PMC */
194 #define KRREG_PMC (1 << 19)
195 #define KRREG_PM0R (1 << 20)
196 #define KRREG_PM1R (1 << 21)
197 #define KRREG_PM2R (1 << 22)
198 #define KRREG_PM3R (1 << 23)
199 #define KRREG_PM4R (1 << 24)
200 #define KRREG_PM0W (1 << 25)
201 #define KRREG_PM1W (1 << 26)
202 #define KRREG_PM2W (1 << 27)
203 #define KRREG_PM3W (1 << 28)
204 #define KRREG_PM4W (1 << 29)
206 /* bitfield of known register values */
207 static u32 known_regb = 0;
209 /* known vals, which need to be flushed
210 * (only ST, P, r0-r7, PMCx, PMxR, PMxW)
211 * ST means flags are being held in ARM PSR
212 * P means that it needs to be recalculated
214 static u32 dirty_regb = 0;
216 /* known values of host regs.
218 * 000000-00ffff - 16bit value
219 * 100000-10ffff - base reg (r7) + 16bit val
220 * 0r0000 - means reg (low) eq gr[r].h, r != AL
222 static int hostreg_r[4];
224 static void hostreg_clear(void)
227 for (i = 0; i < 4; i++)
231 static void hostreg_sspreg_changed(int sspreg)
234 for (i = 0; i < 4; i++)
235 if (hostreg_r[i] == (sspreg<<16)) hostreg_r[i] = -1;
239 #define PROGRAM(x) ((unsigned short *)svp->iram_rom)[x]
240 #define PROGRAM_P(x) ((unsigned short *)svp->iram_rom + (x))
242 void tr_unhandled(void)
244 //FILE *f = fopen("tcache.bin", "wb");
245 //fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
247 elprintf(EL_ANOMALY, "unhandled @ %04x\n", known_regs.gr[SSP_PC].h<<1);
251 /* update P, if needed. Trashes r0 */
252 static void tr_flush_dirty_P(void)
255 if (!(dirty_regb & KRREG_P)) return;
256 EOP_MOV_REG_ASR(10, 4, 16); // mov r10, r4, asr #16
257 EOP_MOV_REG_LSL( 0, 4, 16); // mov r0, r4, lsl #16
258 EOP_MOV_REG_ASR( 0, 0, 15); // mov r0, r0, asr #15
259 EOP_MUL(10, 0, 10); // mul r10, r0, r10
260 dirty_regb &= ~KRREG_P;
264 /* write dirty pr to host reg. Nothing is trashed */
265 static void tr_flush_dirty_pr(int r)
269 if (!(dirty_regb & (1 << (r+8)))) return;
272 case 0: ror = 0; break;
273 case 1: ror = 24/2; break;
274 case 2: ror = 16/2; break;
276 reg = (r < 4) ? 8 : 9;
277 EOP_BIC_IMM(reg,reg,ror,0xff);
278 if (known_regs.r[r] != 0)
279 EOP_ORR_IMM(reg,reg,ror,known_regs.r[r]);
280 dirty_regb &= ~(1 << (r+8));
283 /* write all dirty pr0-pr7 to host regs. Nothing is trashed */
284 static void tr_flush_dirty_prs(void)
287 int dirty = dirty_regb >> 8;
288 if ((dirty&7) == 7) {
289 emith_move_r_imm(8, known_regs.r[0]|(known_regs.r[1]<<8)|(known_regs.r[2]<<16));
292 if ((dirty&0x70) == 0x70) {
293 emith_move_r_imm(9, known_regs.r[4]|(known_regs.r[5]<<8)|(known_regs.r[6]<<16));
297 for (i = 0; dirty && i < 8; i++, dirty >>= 1)
299 if (!(dirty&1)) continue;
301 case 0: ror = 0; break;
302 case 1: ror = 24/2; break;
303 case 2: ror = 16/2; break;
305 reg = (i < 4) ? 8 : 9;
306 EOP_BIC_IMM(reg,reg,ror,0xff);
307 if (known_regs.r[i] != 0)
308 EOP_ORR_IMM(reg,reg,ror,known_regs.r[i]);
310 dirty_regb &= ~0xff00;
313 /* write dirty pr and "forget" it. Nothing is trashed. */
314 static void tr_release_pr(int r)
316 tr_flush_dirty_pr(r);
317 known_regb &= ~(1 << (r+8));
320 /* fush ARM PSR to r6. Trashes r1 */
321 static void tr_flush_dirty_ST(void)
323 if (!(dirty_regb & KRREG_ST)) return;
324 EOP_BIC_IMM(6,6,0,0x0f);
326 EOP_ORR_REG_LSR(6,6,1,28);
327 dirty_regb &= ~KRREG_ST;
331 /* inverse of above. Trashes r1 */
332 static void tr_make_dirty_ST(void)
334 if (dirty_regb & KRREG_ST) return;
335 if (known_regb & KRREG_ST) {
337 if (known_regs.gr[SSP_ST].h & SSP_FLAG_N) flags |= 8;
338 if (known_regs.gr[SSP_ST].h & SSP_FLAG_Z) flags |= 4;
339 EOP_MSR_IMM(4/2, flags);
341 EOP_MOV_REG_LSL(1, 6, 28);
345 dirty_regb |= KRREG_ST;
348 /* load 16bit val into host reg r0-r3. Nothing is trashed */
349 static void tr_mov16(int r, int val)
351 if (hostreg_r[r] != val) {
352 emith_move_r_imm(r, val);
357 static void tr_mov16_cond(int cond, int r, int val)
359 emith_op_imm(cond, A_OP_MOV, r, val);
364 static void tr_flush_dirty_pmcrs(void)
366 u32 i, val = (u32)-1;
367 if (!(dirty_regb & 0x3ff80000)) return;
369 if (dirty_regb & KRREG_PMC) {
370 val = known_regs.pmc.v;
371 emith_move_r_imm(1, val);
372 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
374 if (known_regs.emu_status & (SSP_PMC_SET|SSP_PMC_HAVE_ADDR)) {
375 elprintf(EL_ANOMALY, "!! SSP_PMC_SET|SSP_PMC_HAVE_ADDR set on flush\n");
379 for (i = 0; i < 5; i++)
381 if (dirty_regb & (1 << (20+i))) {
382 if (val != known_regs.pmac_read[i]) {
383 val = known_regs.pmac_read[i];
384 emith_move_r_imm(1, val);
386 EOP_STR_IMM(1,7,0x454+i*4); // pmac_read
388 if (dirty_regb & (1 << (25+i))) {
389 if (val != known_regs.pmac_write[i]) {
390 val = known_regs.pmac_write[i];
391 emith_move_r_imm(1, val);
393 EOP_STR_IMM(1,7,0x46c+i*4); // pmac_write
396 dirty_regb &= ~0x3ff80000;
400 /* read bank word to r0 (upper bits zero). Thrashes r1. */
401 static void tr_bank_read(int addr) /* word addr 0-0x1ff */
405 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
406 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
407 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
411 EOP_LDRH_IMM(0,breg,(addr&0x7f)<<1); // ldrh r0, [r1, (op&0x7f)<<1]
415 /* write r0 to bank. Trashes r1. */
416 static void tr_bank_write(int addr)
420 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
421 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
422 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
426 EOP_STRH_IMM(0,breg,(addr&0x7f)<<1); // strh r0, [r1, (op&0x7f)<<1]
429 /* handle RAM bank pointer modifiers. if need_modulo, trash r1-r3, else nothing */
430 static void tr_ptrr_mod(int r, int mod, int need_modulo, int count)
432 int modulo_shift = -1; /* unknown */
434 if (mod == 0) return;
436 if (!need_modulo || mod == 1) // +!
438 else if (need_modulo && (known_regb & KRREG_ST)) {
439 modulo_shift = known_regs.gr[SSP_ST].h & 7;
440 if (modulo_shift == 0) modulo_shift = 8;
443 if (modulo_shift == -1)
445 int reg = (r < 4) ? 8 : 9;
447 if (dirty_regb & KRREG_ST) {
448 // avoid flushing ARM flags
449 EOP_AND_IMM(1, 6, 0, 0x70);
450 EOP_SUB_IMM(1, 1, 0, 0x10);
451 EOP_AND_IMM(1, 1, 0, 0x70);
452 EOP_ADD_IMM(1, 1, 0, 0x10);
454 EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,1,6,1,0,0x70); // ands r1, r6, #0x70
455 EOP_C_DOP_IMM(A_COND_EQ,A_OP_MOV,0,0,1,0,0x80); // moveq r1, #0x80
457 EOP_MOV_REG_LSR(1, 1, 4); // mov r1, r1, lsr #4
458 EOP_RSB_IMM(2, 1, 0, 8); // rsb r1, r1, #8
459 EOP_MOV_IMM(3, 8/2, count); // mov r3, #0x01000000
461 EOP_ADD_IMM(1, 1, 0, (r&3)*8); // add r1, r1, #(r&3)*8
462 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
464 EOP_SUB_REG2_LSL(reg,reg,3,2); // sub reg, reg, #0x01000000 << r2
465 else EOP_ADD_REG2_LSL(reg,reg,3,2);
466 EOP_RSB_IMM(1, 1, 0, 32); // rsb r1, r1, #32
467 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
468 hostreg_r[1] = hostreg_r[2] = hostreg_r[3] = -1;
470 else if (known_regb & (1 << (r + 8)))
472 int modulo = (1 << modulo_shift) - 1;
474 known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] - count) & modulo);
475 else known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] + count) & modulo);
479 int reg = (r < 4) ? 8 : 9;
480 int ror = ((r&3) + 1)*8 - (8 - modulo_shift);
481 EOP_MOV_REG_ROR(reg,reg,ror);
482 // {add|sub} reg, reg, #1<<shift
483 EOP_C_DOP_IMM(A_COND_AL,(mod==2)?A_OP_SUB:A_OP_ADD,0,reg,reg, 8/2, count << (8 - modulo_shift));
484 EOP_MOV_REG_ROR(reg,reg,32-ror);
488 /* handle writes r0 to (rX). Trashes r1.
489 * fortunately we can ignore modulo increment modes for writes. */
490 static void tr_rX_write(int op)
494 int mod = (op>>2) & 3; // direct addressing
495 tr_bank_write((op & 0x100) + mod);
499 int r = (op&3) | ((op>>6)&4);
500 if (known_regb & (1 << (r + 8))) {
501 tr_bank_write((op&0x100) | known_regs.r[r]);
503 int reg = (r < 4) ? 8 : 9;
504 int ror = ((4 - (r&3))*8) & 0x1f;
505 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
507 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
508 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
509 else EOP_ADD_REG_LSL(1,7,1,1);
510 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
513 tr_ptrr_mod(r, (op>>2) & 3, 0, 1);
517 /* read (rX) to r0. Trashes r1-r3. */
518 static void tr_rX_read(int r, int mod)
522 tr_bank_read(((r << 6) & 0x100) + mod); // direct addressing
526 if (known_regb & (1 << (r + 8))) {
527 tr_bank_read(((r << 6) & 0x100) | known_regs.r[r]);
529 int reg = (r < 4) ? 8 : 9;
530 int ror = ((4 - (r&3))*8) & 0x1f;
531 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
533 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
534 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
535 else EOP_ADD_REG_LSL(1,7,1,1);
536 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
537 hostreg_r[0] = hostreg_r[1] = -1;
539 tr_ptrr_mod(r, mod, 1, 1);
543 /* read ((rX)) to r0. Trashes r1,r2. */
544 static void tr_rX_read2(int op)
546 int r = (op&3) | ((op>>6)&4); // src
549 tr_bank_read((op&0x100) | ((op>>2)&3));
550 } else if (known_regb & (1 << (r+8))) {
551 tr_bank_read((op&0x100) | known_regs.r[r]);
553 int reg = (r < 4) ? 8 : 9;
554 int ror = ((4 - (r&3))*8) & 0x1f;
555 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
557 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
558 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
559 else EOP_ADD_REG_LSL(1,7,1,1);
560 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
562 EOP_LDR_IMM(2,7,0x48c); // ptr_iram_rom
563 EOP_ADD_REG_LSL(2,2,0,1); // add r2, r2, r0, lsl #1
564 EOP_ADD_IMM(0,0,0,1); // add r0, r0, #1
566 tr_bank_write((op&0x100) | ((op>>2)&3));
567 } else if (known_regb & (1 << (r+8))) {
568 tr_bank_write((op&0x100) | known_regs.r[r]);
570 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
573 EOP_LDRH_SIMPLE(0,2); // ldrh r0, [r2]
574 hostreg_r[0] = hostreg_r[2] = -1;
577 // check if AL is going to be used later in block
578 static int tr_predict_al_need(void)
580 int tmpv, tmpv2, op, pc = known_regs.gr[SSP_PC].h;
589 tmpv2 = (op >> 4) & 0xf; // dst
590 tmpv = op & 0xf; // src
591 if ((tmpv2 == SSP_A && tmpv == SSP_P) || tmpv2 == SSP_AL) // ld A, P; ld AL, *
600 case 0x10: case 0x30: case 0x40: case 0x60: case 0x70:
601 tmpv = op & 0xf; // src
602 if (tmpv == SSP_AL) // OP *, AL
612 case 0x74: pc++; break;
622 // mpya (rj), (ri), b
626 case 0x5b: return 0; // cleared anyway
630 tmpv = op & 0xf; // src
631 if (tmpv == SSP_AL) return 1;
632 case 0x51: case 0x53: case 0x54: case 0x55: case 0x59: case 0x5c:
640 /* get ARM cond which would mean that SSP cond is satisfied. No trash. */
641 static int tr_cond_check(int op)
643 int f = (op & 0x100) >> 8;
645 case 0x00: return A_COND_AL; /* always true */
646 case 0x50: /* Z matches f(?) bit */
647 if (dirty_regb & KRREG_ST) return f ? A_COND_EQ : A_COND_NE;
648 EOP_TST_IMM(6, 0, 4);
649 return f ? A_COND_NE : A_COND_EQ;
650 case 0x70: /* N matches f(?) bit */
651 if (dirty_regb & KRREG_ST) return f ? A_COND_MI : A_COND_PL;
652 EOP_TST_IMM(6, 0, 8);
653 return f ? A_COND_NE : A_COND_EQ;
655 elprintf(EL_ANOMALY, "unimplemented cond?\n");
661 static int tr_neg_cond(int cond)
664 case A_COND_AL: elprintf(EL_ANOMALY, "neg for AL?\n"); exit(1);
665 case A_COND_EQ: return A_COND_NE;
666 case A_COND_NE: return A_COND_EQ;
667 case A_COND_MI: return A_COND_PL;
668 case A_COND_PL: return A_COND_MI;
669 default: elprintf(EL_ANOMALY, "bad cond for neg\n"); exit(1);
674 static int tr_aop_ssp2arm(int op)
677 case 1: return A_OP_SUB;
678 case 3: return A_OP_CMP;
679 case 4: return A_OP_ADD;
680 case 5: return A_OP_AND;
681 case 6: return A_OP_ORR;
682 case 7: return A_OP_EOR;
689 // -----------------------------------------------------
693 //@ r6: STACK and emu flags
697 // read general reg to r0. Trashes r1
698 static void tr_GR0_to_r0(int op)
703 static void tr_X_to_r0(int op)
705 if (hostreg_r[0] != (SSP_X<<16)) {
706 EOP_MOV_REG_LSR(0, 4, 16); // mov r0, r4, lsr #16
707 hostreg_r[0] = SSP_X<<16;
711 static void tr_Y_to_r0(int op)
713 if (hostreg_r[0] != (SSP_Y<<16)) {
714 EOP_MOV_REG_SIMPLE(0, 4); // mov r0, r4
715 hostreg_r[0] = SSP_Y<<16;
719 static void tr_A_to_r0(int op)
721 if (hostreg_r[0] != (SSP_A<<16)) {
722 EOP_MOV_REG_LSR(0, 5, 16); // mov r0, r5, lsr #16 @ AH
723 hostreg_r[0] = SSP_A<<16;
727 static void tr_ST_to_r0(int op)
729 // VR doesn't need much accuracy here..
730 EOP_MOV_REG_LSR(0, 6, 4); // mov r0, r6, lsr #4
731 EOP_AND_IMM(0, 0, 0, 0x67); // and r0, r0, #0x67
735 static void tr_STACK_to_r0(int op)
738 EOP_SUB_IMM(6, 6, 8/2, 0x20); // sub r6, r6, #1<<29
739 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
740 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
741 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
742 EOP_LDRH_SIMPLE(0, 1); // ldrh r0, [r1]
743 hostreg_r[0] = hostreg_r[1] = -1;
746 static void tr_PC_to_r0(int op)
748 tr_mov16(0, known_regs.gr[SSP_PC].h);
751 static void tr_P_to_r0(int op)
754 EOP_MOV_REG_LSR(0, 10, 16); // mov r0, r10, lsr #16
758 static void tr_AL_to_r0(int op)
761 if (known_regb & KRREG_PMC) {
762 known_regs.emu_status &= ~(SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
764 EOP_LDR_IMM(0,7,0x484); // ldr r1, [r7, #0x484] // emu_status
765 EOP_BIC_IMM(0,0,0,SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
766 EOP_STR_IMM(0,7,0x484);
770 if (hostreg_r[0] != (SSP_AL<<16)) {
771 EOP_MOV_REG_SIMPLE(0, 5); // mov r0, r5
772 hostreg_r[0] = SSP_AL<<16;
776 static void tr_PMX_to_r0(int reg)
778 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
780 known_regs.pmac_read[reg] = known_regs.pmc.v;
781 known_regs.emu_status &= ~SSP_PMC_SET;
782 known_regb |= 1 << (20+reg);
783 dirty_regb |= 1 << (20+reg);
787 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (20+reg))))
789 u32 pmcv = known_regs.pmac_read[reg];
791 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
793 if ((mode & 0xfff0) == 0x0800)
795 EOP_LDR_IMM(1,7,0x488); // rom_ptr
796 emith_move_r_imm(0, (pmcv&0xfffff)<<1);
797 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
798 known_regs.pmac_read[reg] += 1;
800 else if ((mode & 0x47ff) == 0x0018) // DRAM
802 int inc = get_inc(mode);
803 EOP_LDR_IMM(1,7,0x490); // dram_ptr
804 emith_move_r_imm(0, (pmcv&0xffff)<<1);
805 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
806 if (reg == 4 && (pmcv == 0x187f03 || pmcv == 0x187f04)) // wait loop detection
808 int flag = (pmcv == 0x187f03) ? SSP_WAIT_30FE06 : SSP_WAIT_30FE08;
810 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
811 EOP_TST_REG_SIMPLE(0,0);
812 EOP_C_DOP_IMM(A_COND_EQ,A_OP_SUB,0,11,11,22/2,1); // subeq r11, r11, #1024
813 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1,24/2,flag>>8); // orreq r1, r1, #SSP_WAIT_30FE08
814 EOP_STR_IMM(1,7,0x484); // str r1, [r7, #0x484] // emu_status
816 known_regs.pmac_read[reg] += inc;
822 known_regs.pmc.v = known_regs.pmac_read[reg];
823 //known_regb |= KRREG_PMC;
824 dirty_regb |= KRREG_PMC;
825 dirty_regb |= 1 << (20+reg);
826 hostreg_r[0] = hostreg_r[1] = -1;
830 known_regb &= ~KRREG_PMC;
831 dirty_regb &= ~KRREG_PMC;
832 known_regb &= ~(1 << (20+reg));
833 dirty_regb &= ~(1 << (20+reg));
835 // call the C code to handle this
837 //tr_flush_dirty_pmcrs();
839 emith_call(ssp_pm_read);
843 static void tr_PM0_to_r0(int op)
848 static void tr_PM1_to_r0(int op)
853 static void tr_PM2_to_r0(int op)
858 static void tr_XST_to_r0(int op)
860 EOP_ADD_IMM(0, 7, 24/2, 4); // add r0, r7, #0x400
861 EOP_LDRH_IMM(0, 0, SSP_XST*4+2);
864 static void tr_PM4_to_r0(int op)
869 static void tr_PMC_to_r0(int op)
871 if (known_regb & KRREG_PMC)
873 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
874 known_regs.emu_status |= SSP_PMC_SET;
875 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
876 // do nothing - this is handled elsewhere
878 tr_mov16(0, known_regs.pmc.l);
879 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
884 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
887 EOP_LDR_IMM(0, 7, 0x400+SSP_PMC*4);
888 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
889 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
890 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
891 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
892 EOP_STR_IMM(1,7,0x484);
893 hostreg_r[0] = hostreg_r[1] = -1;
898 typedef void (tr_read_func)(int op);
900 static tr_read_func *tr_read_funcs[16] =
915 (tr_read_func *)tr_unhandled,
921 // write r0 to general reg handlers. Trashes r1
922 #define TR_WRITE_R0_TO_REG(reg) \
924 hostreg_sspreg_changed(reg); \
925 hostreg_r[0] = (reg)<<16; \
926 if (const_val != -1) { \
927 known_regs.gr[reg].h = const_val; \
928 known_regb |= 1 << (reg); \
930 known_regb &= ~(1 << (reg)); \
934 static void tr_r0_to_GR0(int const_val)
939 static void tr_r0_to_X(int const_val)
941 EOP_MOV_REG_LSL(4, 4, 16); // mov r4, r4, lsl #16
942 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
943 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
944 dirty_regb |= KRREG_P; // touching X or Y makes P dirty.
945 TR_WRITE_R0_TO_REG(SSP_X);
948 static void tr_r0_to_Y(int const_val)
950 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
951 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
952 EOP_MOV_REG_ROR(4, 4, 16); // mov r4, r4, ror #16
953 dirty_regb |= KRREG_P;
954 TR_WRITE_R0_TO_REG(SSP_Y);
957 static void tr_r0_to_A(int const_val)
959 if (tr_predict_al_need()) {
960 EOP_MOV_REG_LSL(5, 5, 16); // mov r5, r5, lsl #16
961 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16 @ AL
962 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
965 EOP_MOV_REG_LSL(5, 0, 16);
966 TR_WRITE_R0_TO_REG(SSP_A);
969 static void tr_r0_to_ST(int const_val)
971 // VR doesn't need much accuracy here..
972 EOP_AND_IMM(1, 0, 0, 0x67); // and r1, r0, #0x67
973 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
974 EOP_ORR_REG_LSL(6, 6, 1, 4); // orr r6, r6, r1, lsl #4
975 TR_WRITE_R0_TO_REG(SSP_ST);
977 dirty_regb &= ~KRREG_ST;
980 static void tr_r0_to_STACK(int const_val)
983 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
984 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
985 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
986 EOP_STRH_SIMPLE(0, 1); // strh r0, [r1]
987 EOP_ADD_IMM(6, 6, 8/2, 0x20); // add r6, r6, #1<<29
991 static void tr_r0_to_PC(int const_val)
994 * do nothing - dispatcher will take care of this
995 EOP_MOV_REG_LSL(1, 0, 16); // mov r1, r0, lsl #16
996 EOP_STR_IMM(1,7,0x400+6*4); // str r1, [r7, #(0x400+6*8)]
1001 static void tr_r0_to_AL(int const_val)
1003 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16
1004 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
1005 EOP_MOV_REG_ROR(5, 5, 16); // mov r5, r5, ror #16
1006 hostreg_sspreg_changed(SSP_AL);
1007 if (const_val != -1) {
1008 known_regs.gr[SSP_A].l = const_val;
1009 known_regb |= 1 << SSP_AL;
1011 known_regb &= ~(1 << SSP_AL);
1014 static void tr_r0_to_PMX(int reg)
1016 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
1018 known_regs.pmac_write[reg] = known_regs.pmc.v;
1019 known_regs.emu_status &= ~SSP_PMC_SET;
1020 known_regb |= 1 << (25+reg);
1021 dirty_regb |= 1 << (25+reg);
1025 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (25+reg))))
1029 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1031 mode = known_regs.pmac_write[reg]>>16;
1032 addr = known_regs.pmac_write[reg]&0xffff;
1033 if ((mode & 0x43ff) == 0x0018) // DRAM
1035 int inc = get_inc(mode);
1036 if (mode & 0x0400) tr_unhandled();
1037 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1038 emith_move_r_imm(2, addr << 1);
1039 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1040 known_regs.pmac_write[reg] += inc;
1042 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
1044 if (mode & 0x0400) tr_unhandled();
1045 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1046 emith_move_r_imm(2, addr << 1);
1047 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1048 known_regs.pmac_write[reg] += (addr&1) ? 31 : 1;
1050 else if ((mode & 0x47ff) == 0x001c) // IRAM
1052 int inc = get_inc(mode);
1053 EOP_LDR_IMM(1,7,0x48c); // iram_ptr
1054 emith_move_r_imm(2, (addr&0x3ff) << 1);
1055 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1057 EOP_STR_IMM(1,7,0x494); // iram_dirty
1058 known_regs.pmac_write[reg] += inc;
1063 known_regs.pmc.v = known_regs.pmac_write[reg];
1064 //known_regb |= KRREG_PMC;
1065 dirty_regb |= KRREG_PMC;
1066 dirty_regb |= 1 << (25+reg);
1067 hostreg_r[1] = hostreg_r[2] = -1;
1071 known_regb &= ~KRREG_PMC;
1072 dirty_regb &= ~KRREG_PMC;
1073 known_regb &= ~(1 << (25+reg));
1074 dirty_regb &= ~(1 << (25+reg));
1076 // call the C code to handle this
1077 tr_flush_dirty_ST();
1078 //tr_flush_dirty_pmcrs();
1080 emith_call(ssp_pm_write);
1084 static void tr_r0_to_PM0(int const_val)
1089 static void tr_r0_to_PM1(int const_val)
1094 static void tr_r0_to_PM2(int const_val)
1099 static void tr_r0_to_PM4(int const_val)
1104 static void tr_r0_to_PMC(int const_val)
1106 if ((known_regb & KRREG_PMC) && const_val != -1)
1108 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
1109 known_regs.emu_status |= SSP_PMC_SET;
1110 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1111 known_regs.pmc.h = const_val;
1113 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
1114 known_regs.pmc.l = const_val;
1119 tr_flush_dirty_ST();
1120 if (known_regb & KRREG_PMC) {
1121 emith_move_r_imm(1, known_regs.pmc.v);
1122 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
1123 known_regb &= ~KRREG_PMC;
1124 dirty_regb &= ~KRREG_PMC;
1126 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
1127 EOP_ADD_IMM(2,7,24/2,4); // add r2, r7, #0x400
1128 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
1129 EOP_C_AM3_IMM(A_COND_EQ,1,0,2,0,0,1,SSP_PMC*4); // strxx r0, [r2, #SSP_PMC]
1130 EOP_C_AM3_IMM(A_COND_NE,1,0,2,0,0,1,SSP_PMC*4+2);
1131 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
1132 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
1133 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
1134 EOP_STR_IMM(1,7,0x484);
1135 hostreg_r[1] = hostreg_r[2] = -1;
1139 typedef void (tr_write_func)(int const_val);
1141 static tr_write_func *tr_write_funcs[16] =
1150 (tr_write_func *)tr_unhandled,
1154 (tr_write_func *)tr_unhandled,
1156 (tr_write_func *)tr_unhandled,
1161 static void tr_mac_load_XY(int op)
1163 tr_rX_read(op&3, (op>>2)&3); // X
1164 EOP_MOV_REG_LSL(4, 0, 16);
1165 tr_rX_read(((op>>4)&3)|4, (op>>6)&3); // Y
1166 EOP_ORR_REG_SIMPLE(4, 0);
1167 dirty_regb |= KRREG_P;
1168 hostreg_sspreg_changed(SSP_X);
1169 hostreg_sspreg_changed(SSP_Y);
1170 known_regb &= ~KRREG_X;
1171 known_regb &= ~KRREG_Y;
1174 // -----------------------------------------------------
1176 static int tr_detect_set_pm(unsigned int op, int *pc, int imm)
1179 if (!((op&0xfef0) == 0x08e0 && (PROGRAM(*pc)&0xfef0) == 0x08e0)) return 0;
1185 pmcv = imm | (PROGRAM((*pc)++) << 16);
1186 known_regs.pmc.v = pmcv;
1187 known_regb |= KRREG_PMC;
1188 dirty_regb |= KRREG_PMC;
1189 known_regs.emu_status |= SSP_PMC_SET;
1192 // check for possible reg programming
1193 tmpv = PROGRAM(*pc);
1194 if ((tmpv & 0xfff8) == 0x08 || (tmpv & 0xff8f) == 0x80)
1196 int is_write = (tmpv & 0xff8f) == 0x80;
1197 int reg = is_write ? ((tmpv>>4)&0x7) : (tmpv&0x7);
1198 if (reg > 4) tr_unhandled();
1199 if ((tmpv & 0x0f) != 0 && (tmpv & 0xf0) != 0) tr_unhandled();
1200 known_regs.pmac_read[is_write ? reg + 5 : reg] = pmcv;
1201 known_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1202 dirty_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1203 known_regs.emu_status &= ~SSP_PMC_SET;
1213 static const short pm0_block_seq[] = { 0x0880, 0, 0x0880, 0, 0x0840, 0x60 };
1215 static int tr_detect_pm0_block(unsigned int op, int *pc, int imm)
1222 if (op != 0x0840 || imm != 0) return 0;
1223 pp = PROGRAM_P(*pc);
1224 if (memcmp(pp, pm0_block_seq, sizeof(pm0_block_seq)) != 0) return 0;
1226 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
1227 EOP_ORR_IMM(6, 6, 24/2, 6); // orr r6, r6, 0x600
1228 hostreg_sspreg_changed(SSP_ST);
1229 known_regs.gr[SSP_ST].h = 0x60;
1230 known_regb |= 1 << SSP_ST;
1231 dirty_regb &= ~KRREG_ST;
1237 static int tr_detect_rotate(unsigned int op, int *pc, int imm)
1243 if (op != 0x02e3 || PROGRAM(*pc) != 0x04e3 || PROGRAM(*pc + 1) != 0x000f) return 0;
1246 EOP_MOV_REG_LSL(0, 0, 4);
1247 EOP_ORR_REG_LSR(0, 0, 0, 16);
1254 // -----------------------------------------------------
1256 static int translate_op(unsigned int op, int *pc, int imm, int *end_cond, int *jump_pc)
1258 u32 tmpv, tmpv2, tmpv3;
1260 known_regs.gr[SSP_PC].h = *pc;
1266 if (op == 0) { ret++; break; } // nop
1267 tmpv = op & 0xf; // src
1268 tmpv2 = (op >> 4) & 0xf; // dst
1269 if (tmpv2 == SSP_A && tmpv == SSP_P) { // ld A, P
1271 EOP_MOV_REG_SIMPLE(5, 10);
1272 hostreg_sspreg_changed(SSP_A);
1273 known_regb &= ~(KRREG_A|KRREG_AL);
1276 tr_read_funcs[tmpv](op);
1277 tr_write_funcs[tmpv2]((known_regb & (1 << tmpv)) ? known_regs.gr[tmpv].h : -1);
1278 if (tmpv2 == SSP_PC) {
1280 *end_cond = -A_COND_AL;
1286 int r = (op&3) | ((op>>6)&4);
1287 int mod = (op>>2)&3;
1288 tmpv = (op >> 4) & 0xf; // dst
1289 ret = tr_detect_rotate(op, pc, imm);
1295 while (PROGRAM(*pc) == op) {
1296 (*pc)++; cnt++; ret++;
1299 tr_ptrr_mod(r, mod, 1, cnt); // skip
1301 tr_write_funcs[tmpv](-1);
1302 if (tmpv == SSP_PC) {
1304 *end_cond = -A_COND_AL;
1311 tmpv = (op >> 4) & 0xf; // src
1312 tr_read_funcs[tmpv](op);
1318 tr_bank_read(op&0x1ff);
1324 tmpv = (op & 0xf0) >> 4; // dst
1325 ret = tr_detect_pm0_block(op, pc, imm);
1327 ret = tr_detect_set_pm(op, pc, imm);
1330 tr_write_funcs[tmpv](imm);
1331 if (tmpv == SSP_PC) {
1339 tmpv2 = (op >> 4) & 0xf; // dst
1341 tr_write_funcs[tmpv2](-1);
1342 if (tmpv2 == SSP_PC) {
1344 *end_cond = -A_COND_AL;
1357 tr_bank_write(op&0x1ff);
1363 r = (op&3) | ((op>>6)&4); // src
1364 tmpv2 = (op >> 4) & 0xf; // dst
1365 if ((r&3) == 3) tr_unhandled();
1367 if (known_regb & (1 << (r+8))) {
1368 tr_mov16(0, known_regs.r[r]);
1369 tr_write_funcs[tmpv2](known_regs.r[r]);
1371 int reg = (r < 4) ? 8 : 9;
1372 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1373 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1375 tr_write_funcs[tmpv2](-1);
1383 r = (op&3) | ((op>>6)&4); // dst
1384 tmpv = (op >> 4) & 0xf; // src
1385 if ((r&3) == 3) tr_unhandled();
1387 if (known_regb & (1 << tmpv)) {
1388 known_regs.r[r] = known_regs.gr[tmpv].h;
1389 known_regb |= 1 << (r + 8);
1390 dirty_regb |= 1 << (r + 8);
1392 int reg = (r < 4) ? 8 : 9;
1393 int ror = ((4 - (r&3))*8) & 0x1f;
1394 tr_read_funcs[tmpv](op);
1395 EOP_BIC_IMM(reg, reg, ror/2, 0xff); // bic r{7,8}, r{7,8}, <mask>
1396 EOP_AND_IMM(0, 0, 0, 0xff); // and r0, r0, 0xff
1397 EOP_ORR_REG_LSL(reg, reg, 0, (r&3)*8); // orr r{7,8}, r{7,8}, r0, lsl #lsl
1399 known_regb &= ~(1 << (r+8));
1400 dirty_regb &= ~(1 << (r+8));
1406 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
1408 known_regs.r[tmpv] = op;
1409 known_regb |= 1 << (tmpv + 8);
1410 dirty_regb |= 1 << (tmpv + 8);
1415 u32 *jump_op = NULL;
1416 tmpv = tr_cond_check(op);
1417 if (tmpv != A_COND_AL) {
1418 jump_op = tcache_ptr;
1419 EOP_MOV_IMM(0, 0, 0); // placeholder for branch
1422 tr_r0_to_STACK(*pc);
1423 if (tmpv != A_COND_AL) {
1424 u32 *real_ptr = tcache_ptr;
1425 tcache_ptr = jump_op;
1426 EOP_C_B(tr_neg_cond(tmpv),0,real_ptr - jump_op - 2);
1427 tcache_ptr = real_ptr;
1429 tr_mov16_cond(tmpv, 0, imm);
1430 if (tmpv != A_COND_AL)
1431 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1432 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1441 tmpv2 = (op >> 4) & 0xf; // dst
1443 EOP_LDR_IMM(1,7,0x48c); // ptr_iram_rom
1444 EOP_ADD_REG_LSL(0,1,0,1); // add r0, r1, r0, lsl #1
1445 EOP_LDRH_SIMPLE(0,0); // ldrh r0, [r0]
1446 hostreg_r[0] = hostreg_r[1] = -1;
1447 tr_write_funcs[tmpv2](-1);
1448 if (tmpv2 == SSP_PC) {
1450 *end_cond = -A_COND_AL;
1456 tmpv = tr_cond_check(op);
1457 tr_mov16_cond(tmpv, 0, imm);
1458 if (tmpv != A_COND_AL)
1459 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1460 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1468 // check for repeats of this op
1470 while (PROGRAM(*pc) == op && (op & 7) != 6) {
1474 if ((op&0xf0) != 0) // !always
1477 tmpv2 = tr_cond_check(op);
1479 case 2: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_ASR,5); break; // shr (arithmetic)
1480 case 3: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_LSL,5); break; // shl
1481 case 6: EOP_C_DOP_IMM(tmpv2,A_OP_RSB,1,5,5,0,0); break; // neg
1482 case 7: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_EOR,0,5,1,31,A_AM1_ASR,5); // eor r1, r5, r5, asr #31
1483 EOP_C_DOP_REG_XIMM(tmpv2,A_OP_ADD,1,1,5,31,A_AM1_LSR,5); // adds r5, r1, r5, lsr #31
1484 hostreg_r[1] = -1; break; // abs
1485 default: tr_unhandled();
1488 hostreg_sspreg_changed(SSP_A);
1489 dirty_regb |= KRREG_ST;
1490 known_regb &= ~KRREG_ST;
1491 known_regb &= ~(KRREG_A|KRREG_AL);
1500 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_SUB,1,5,5,0,A_AM1_LSL,10); // subs r5, r5, r10
1501 hostreg_sspreg_changed(SSP_A);
1502 known_regb &= ~(KRREG_A|KRREG_AL);
1503 dirty_regb |= KRREG_ST;
1506 // mpya (rj), (ri), b
1511 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_ADD,1,5,5,0,A_AM1_LSL,10); // adds r5, r5, r10
1512 hostreg_sspreg_changed(SSP_A);
1513 known_regb &= ~(KRREG_A|KRREG_AL);
1514 dirty_regb |= KRREG_ST;
1517 // mld (rj), (ri), b
1519 EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,1,0,5,0,0); // movs r5, #0
1520 hostreg_sspreg_changed(SSP_A);
1521 known_regs.gr[SSP_A].v = 0;
1522 known_regb |= (KRREG_A|KRREG_AL);
1523 dirty_regb |= KRREG_ST;
1534 tmpv = op & 0xf; // src
1535 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1536 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1537 if (tmpv == SSP_P) {
1539 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL,10); // OPs r5, r5, r10
1540 } else if (tmpv == SSP_A) {
1541 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL, 5); // OPs r5, r5, r5
1543 tr_read_funcs[tmpv](op);
1544 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL, 0); // OPs r5, r5, r0, lsl #16
1546 hostreg_sspreg_changed(SSP_A);
1547 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1548 dirty_regb |= KRREG_ST;
1558 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1559 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1560 tr_rX_read((op&3)|((op>>6)&4), (op>>2)&3);
1561 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1562 hostreg_sspreg_changed(SSP_A);
1563 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1564 dirty_regb |= KRREG_ST;
1574 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1575 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1576 tr_bank_read(op&0x1ff);
1577 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1578 hostreg_sspreg_changed(SSP_A);
1579 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1580 dirty_regb |= KRREG_ST;
1590 tmpv = (op & 0xf0) >> 4;
1591 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1592 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1594 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1595 hostreg_sspreg_changed(SSP_A);
1596 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1597 dirty_regb |= KRREG_ST;
1607 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1608 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1610 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1611 hostreg_sspreg_changed(SSP_A);
1612 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1613 dirty_regb |= KRREG_ST;
1624 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1625 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1626 r = (op&3) | ((op>>6)&4); // src
1627 if ((r&3) == 3) tr_unhandled();
1629 if (known_regb & (1 << (r+8))) {
1630 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,known_regs.r[r]); // OPs r5, r5, #val<<16
1632 int reg = (r < 4) ? 8 : 9;
1633 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1634 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1635 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1638 hostreg_sspreg_changed(SSP_A);
1639 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1640 dirty_regb |= KRREG_ST;
1651 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1652 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1653 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,op & 0xff); // OPs r5, r5, #val<<16
1654 hostreg_sspreg_changed(SSP_A);
1655 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1656 dirty_regb |= KRREG_ST;
1665 static void emit_block_prologue(void)
1667 // check if there are enough cycles..
1668 // note: r0 must contain PC of current block
1669 EOP_CMP_IMM(11,0,0); // cmp r11, #0
1670 emith_jump_cond(A_COND_LE, ssp_drc_end);
1674 * >0: direct (un)conditional jump
1677 static void emit_block_epilogue(int cycles, int cond, int pc, int end_pc)
1679 if (cycles > 0xff) { elprintf(EL_ANOMALY, "large cycle count: %i\n", cycles); cycles = 0xff; }
1680 EOP_SUB_IMM(11,11,0,cycles); // sub r11, r11, #cycles
1682 if (cond < 0 || (end_pc >= 0x400 && pc < 0x400)) {
1683 // indirect jump, or rom -> iram jump, must use dispatcher
1684 emith_jump(ssp_drc_next);
1686 else if (cond == A_COND_AL) {
1687 u32 *target = (pc < 0x400) ?
1688 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1689 ssp_block_table[pc];
1693 int ops = emith_jump(ssp_drc_next);
1694 // cause the next block to be emitted over jump instruction
1699 u32 *target1 = (pc < 0x400) ?
1700 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1701 ssp_block_table[pc];
1702 u32 *target2 = (end_pc < 0x400) ?
1703 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + end_pc] :
1704 ssp_block_table[end_pc];
1705 if (target1 != NULL)
1706 emith_jump_cond(cond, target1);
1707 if (target2 != NULL)
1708 emith_jump_cond(tr_neg_cond(cond), target2); // neg_cond, to be able to swap jumps if needed
1710 // emit patchable branches
1711 if (target1 == NULL)
1712 emith_call_cond(cond, ssp_drc_next_patch);
1713 if (target2 == NULL)
1714 emith_call_cond(tr_neg_cond(cond), ssp_drc_next_patch);
1716 // won't patch indirect jumps
1717 if (target1 == NULL || target2 == NULL)
1718 emith_jump(ssp_drc_next);
1723 void *ssp_translate_block(int pc)
1725 unsigned int op, op1, imm, ccount = 0;
1726 unsigned int *block_start;
1727 int ret, end_cond = A_COND_AL, jump_pc = -1;
1729 //printf("translate %04x -> %04x\n", pc<<1, (tcache_ptr-tcache)<<2);
1731 block_start = tcache_ptr;
1733 dirty_regb = KRREG_P;
1734 known_regs.emu_status = 0;
1737 emit_block_prologue();
1739 for (; ccount < 100;)
1745 if ((op1 & 0xf) == 4 || (op1 & 0xf) == 6)
1746 imm = PROGRAM(pc++); // immediate
1748 ret = translate_op(op, &pc, imm, &end_cond, &jump_pc);
1751 elprintf(EL_ANOMALY, "NULL func! op=%08x (%02x)\n", op, op1);
1755 ccount += ret & 0xffff;
1756 if (ret & 0x10000) break;
1759 if (ccount >= 100) {
1760 end_cond = A_COND_AL;
1762 emith_move_r_imm(0, pc);
1765 tr_flush_dirty_prs();
1766 tr_flush_dirty_ST();
1767 tr_flush_dirty_pmcrs();
1768 emit_block_epilogue(ccount, end_cond, jump_pc, pc);
1770 if (tcache_ptr - (u32 *)tcache > DRC_TCACHE_SIZE/4) {
1771 elprintf(EL_ANOMALY|EL_STATUS|EL_SVP, "tcache overflow!\n");
1778 //printf("%i blocks, %i bytes, k=%.3f\n", nblocks, (tcache_ptr - tcache)*4,
1779 // (double)(tcache_ptr - tcache) / (double)n_in_ops);
1783 FILE *f = fopen("tcache.bin", "wb");
1784 fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
1787 printf("dumped tcache.bin\n");
1792 cache_flush_d_inval_i(tcache, tcache_ptr);
1800 // -----------------------------------------------------
1802 static void ssp1601_state_load(void)
1804 ssp->drc.iram_dirty = 1;
1805 ssp->drc.iram_context = 0;
1808 void ssp1601_dyn_exit(void)
1810 free(ssp_block_table);
1811 free(ssp_block_table_iram);
1812 ssp_block_table = ssp_block_table_iram = NULL;
1817 int ssp1601_dyn_startup(void)
1821 ssp_block_table = calloc(sizeof(ssp_block_table[0]), SSP_BLOCKTAB_ENTS);
1822 if (ssp_block_table == NULL)
1824 ssp_block_table_iram = calloc(sizeof(ssp_block_table_iram[0]), SSP_BLOCKTAB_IRAM_ENTS);
1825 if (ssp_block_table_iram == NULL) {
1826 free(ssp_block_table);
1830 memset(tcache, 0, DRC_TCACHE_SIZE);
1831 tcache_ptr = (void *)tcache;
1833 PicoLoadStateHook = ssp1601_state_load;
1838 ssp_block_table[0x800/2] = (void *) ssp_hle_800;
1839 ssp_block_table[0x902/2] = (void *) ssp_hle_902;
1840 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x030/2] = (void *) ssp_hle_07_030;
1841 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x036/2] = (void *) ssp_hle_07_036;
1842 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x6d6/2] = (void *) ssp_hle_07_6d6;
1843 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x12c/2] = (void *) ssp_hle_11_12c;
1844 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x384/2] = (void *) ssp_hle_11_384;
1845 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x38a/2] = (void *) ssp_hle_11_38a;
1852 void ssp1601_dyn_reset(ssp1601_t *ssp)
1855 ssp->drc.iram_dirty = 1;
1856 ssp->drc.iram_context = 0;
1857 // must do this here because ssp is not available @ startup()
1858 ssp->drc.ptr_rom = (u32) Pico.rom;
1859 ssp->drc.ptr_iram_rom = (u32) svp->iram_rom;
1860 ssp->drc.ptr_dram = (u32) svp->dram;
1861 ssp->drc.ptr_btable = (u32) ssp_block_table;
1862 ssp->drc.ptr_btable_iram = (u32) ssp_block_table_iram;
1864 // prevent new versions of IRAM from appearing
1865 memset(svp->iram_rom, 0, 0x800);
1869 void ssp1601_dyn_run(int cycles)
1871 if (ssp->emu_status & SSP_WAIT_MASK) return;
1874 ssp_translate_block(DUMP_BLOCK >> 1);
1877 ssp_drc_entry(cycles);