1 // SSP1601 to ARM recompiler
3 // (c) Copyright 2008, Grazvydas "notaz" Ignotas
4 // Free for non-commercial use.
6 #include "../../pico_int.h"
7 #include "../../../cpu/drc/cmn.h"
10 // FIXME: asm has these hardcoded
11 #define SSP_BLOCKTAB_ENTS (0x5090/2)
12 #define SSP_BLOCKTAB_IRAM_ONE (0x800/2) // table entries
13 #define SSP_BLOCKTAB_IRAM_ENTS (15*SSP_BLOCKTAB_IRAM_ONE)
15 static u32 **ssp_block_table; // [0x5090/2];
16 static u32 **ssp_block_table_iram; // [15][0x800/2];
18 static u32 *tcache_ptr = NULL;
20 static int nblocks = 0;
21 static int n_in_ops = 0;
23 extern ssp1601_t *ssp;
25 #define rPC ssp->gr[SSP_PC].h
26 #define rPMC ssp->gr[SSP_PMC]
28 #define SSP_FLAG_Z (1<<0xd)
29 #define SSP_FLAG_N (1<<0xf)
32 //#define DUMP_BLOCK 0x0c9a
33 void ssp_drc_next(void){}
34 void ssp_drc_next_patch(void){}
35 void ssp_drc_end(void){}
40 // -----------------------------------------------------
42 static int get_inc(int mode)
44 int inc = (mode >> 11) & 7;
47 inc = 1 << inc; // 0 1 2 4 8 16 32 128
48 if (mode & 0x8000) inc = -inc; // decrement mode
53 u32 ssp_pm_read(int reg)
57 if (ssp->emu_status & SSP_PMC_SET)
59 ssp->pmac_read[reg] = rPMC.v;
60 ssp->emu_status &= ~SSP_PMC_SET;
65 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
67 mode = ssp->pmac_read[reg]>>16;
68 if ((mode & 0xfff0) == 0x0800) // ROM
70 d = ((unsigned short *)Pico.rom)[ssp->pmac_read[reg]&0xfffff];
71 ssp->pmac_read[reg] += 1;
73 else if ((mode & 0x47ff) == 0x0018) // DRAM
75 unsigned short *dram = (unsigned short *)svp->dram;
76 int inc = get_inc(mode);
77 d = dram[ssp->pmac_read[reg]&0xffff];
78 ssp->pmac_read[reg] += inc;
81 // PMC value corresponds to last PMR accessed
82 rPMC.v = ssp->pmac_read[reg];
87 #define overwrite_write(dst, d) \
89 if (d & 0xf000) { dst &= ~0xf000; dst |= d & 0xf000; } \
90 if (d & 0x0f00) { dst &= ~0x0f00; dst |= d & 0x0f00; } \
91 if (d & 0x00f0) { dst &= ~0x00f0; dst |= d & 0x00f0; } \
92 if (d & 0x000f) { dst &= ~0x000f; dst |= d & 0x000f; } \
95 void ssp_pm_write(u32 d, int reg)
100 if (ssp->emu_status & SSP_PMC_SET)
102 ssp->pmac_write[reg] = rPMC.v;
103 ssp->emu_status &= ~SSP_PMC_SET;
108 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
110 dram = (unsigned short *)svp->dram;
111 mode = ssp->pmac_write[reg]>>16;
112 addr = ssp->pmac_write[reg]&0xffff;
113 if ((mode & 0x43ff) == 0x0018) // DRAM
115 int inc = get_inc(mode);
117 overwrite_write(dram[addr], d);
118 } else dram[addr] = d;
119 ssp->pmac_write[reg] += inc;
121 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
124 overwrite_write(dram[addr], d);
125 } else dram[addr] = d;
126 ssp->pmac_write[reg] += (addr&1) ? 0x1f : 1;
128 else if ((mode & 0x47ff) == 0x001c) // IRAM
130 int inc = get_inc(mode);
131 ((unsigned short *)svp->iram_rom)[addr&0x3ff] = d;
132 ssp->pmac_write[reg] += inc;
133 ssp->drc.iram_dirty = 1;
136 rPMC.v = ssp->pmac_write[reg];
140 // -----------------------------------------------------
143 static unsigned char iram_context_map[] =
145 0, 0, 0, 0, 1, 0, 0, 0, // 04
146 0, 0, 0, 0, 0, 0, 2, 0, // 0e
147 0, 0, 0, 0, 0, 3, 0, 4, // 15 17
148 5, 0, 0, 6, 0, 7, 0, 0, // 18 1b 1d
149 8, 9, 0, 0, 0,10, 0, 0, // 20 21 25
150 0, 0, 0, 0, 0, 0, 0, 0,
151 0, 0,11, 0, 0,12, 0, 0, // 32 35
152 13,14, 0, 0, 0, 0, 0, 0 // 38 39
155 int ssp_get_iram_context(void)
157 unsigned char *ir = (unsigned char *)svp->iram_rom;
158 int val1, val = ir[0x083^1] + ir[0x4FA^1] + ir[0x5F7^1] + ir[0x47B^1];
159 val1 = iram_context_map[(val>>1)&0x3f];
162 elprintf(EL_ANOMALY, "svp: iram ctx val: %02x PC=%04x\n", (val>>1)&0x3f, rPC);
163 //debug_dump2file(name, svp->iram_rom, 0x800);
169 // -----------------------------------------------------
171 /* regs with known values */
176 unsigned int pmac_read[5];
177 unsigned int pmac_write[5];
179 unsigned int emu_status;
182 #define KRREG_X (1 << SSP_X)
183 #define KRREG_Y (1 << SSP_Y)
184 #define KRREG_A (1 << SSP_A) /* AH only */
185 #define KRREG_ST (1 << SSP_ST)
186 #define KRREG_STACK (1 << SSP_STACK)
187 #define KRREG_PC (1 << SSP_PC)
188 #define KRREG_P (1 << SSP_P)
189 #define KRREG_PR0 (1 << 8)
190 #define KRREG_PR4 (1 << 12)
191 #define KRREG_AL (1 << 16)
192 #define KRREG_PMCM (1 << 18) /* only mode word of PMC */
193 #define KRREG_PMC (1 << 19)
194 #define KRREG_PM0R (1 << 20)
195 #define KRREG_PM1R (1 << 21)
196 #define KRREG_PM2R (1 << 22)
197 #define KRREG_PM3R (1 << 23)
198 #define KRREG_PM4R (1 << 24)
199 #define KRREG_PM0W (1 << 25)
200 #define KRREG_PM1W (1 << 26)
201 #define KRREG_PM2W (1 << 27)
202 #define KRREG_PM3W (1 << 28)
203 #define KRREG_PM4W (1 << 29)
205 /* bitfield of known register values */
206 static u32 known_regb = 0;
208 /* known vals, which need to be flushed
209 * (only ST, P, r0-r7, PMCx, PMxR, PMxW)
210 * ST means flags are being held in ARM PSR
211 * P means that it needs to be recalculated
213 static u32 dirty_regb = 0;
215 /* known values of host regs.
217 * 000000-00ffff - 16bit value
218 * 100000-10ffff - base reg (r7) + 16bit val
219 * 0r0000 - means reg (low) eq gr[r].h, r != AL
221 static int hostreg_r[4];
223 static void hostreg_clear(void)
226 for (i = 0; i < 4; i++)
230 static void hostreg_sspreg_changed(int sspreg)
233 for (i = 0; i < 4; i++)
234 if (hostreg_r[i] == (sspreg<<16)) hostreg_r[i] = -1;
238 #define PROGRAM(x) ((unsigned short *)svp->iram_rom)[x]
239 #define PROGRAM_P(x) ((unsigned short *)svp->iram_rom + (x))
241 void tr_unhandled(void)
243 //FILE *f = fopen("tcache.bin", "wb");
244 //fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
246 elprintf(EL_ANOMALY, "unhandled @ %04x\n", known_regs.gr[SSP_PC].h<<1);
250 /* update P, if needed. Trashes r0 */
251 static void tr_flush_dirty_P(void)
254 if (!(dirty_regb & KRREG_P)) return;
255 EOP_MOV_REG_ASR(10, 4, 16); // mov r10, r4, asr #16
256 EOP_MOV_REG_LSL( 0, 4, 16); // mov r0, r4, lsl #16
257 EOP_MOV_REG_ASR( 0, 0, 15); // mov r0, r0, asr #15
258 EOP_MUL(10, 0, 10); // mul r10, r0, r10
259 dirty_regb &= ~KRREG_P;
263 /* write dirty pr to host reg. Nothing is trashed */
264 static void tr_flush_dirty_pr(int r)
268 if (!(dirty_regb & (1 << (r+8)))) return;
271 case 0: ror = 0; break;
272 case 1: ror = 24/2; break;
273 case 2: ror = 16/2; break;
275 reg = (r < 4) ? 8 : 9;
276 EOP_BIC_IMM(reg,reg,ror,0xff);
277 if (known_regs.r[r] != 0)
278 EOP_ORR_IMM(reg,reg,ror,known_regs.r[r]);
279 dirty_regb &= ~(1 << (r+8));
282 /* write all dirty pr0-pr7 to host regs. Nothing is trashed */
283 static void tr_flush_dirty_prs(void)
286 int dirty = dirty_regb >> 8;
287 if ((dirty&7) == 7) {
288 emit_mov_const(A_COND_AL, 8, known_regs.r[0]|(known_regs.r[1]<<8)|(known_regs.r[2]<<16));
291 if ((dirty&0x70) == 0x70) {
292 emit_mov_const(A_COND_AL, 9, known_regs.r[4]|(known_regs.r[5]<<8)|(known_regs.r[6]<<16));
296 for (i = 0; dirty && i < 8; i++, dirty >>= 1)
298 if (!(dirty&1)) continue;
300 case 0: ror = 0; break;
301 case 1: ror = 24/2; break;
302 case 2: ror = 16/2; break;
304 reg = (i < 4) ? 8 : 9;
305 EOP_BIC_IMM(reg,reg,ror,0xff);
306 if (known_regs.r[i] != 0)
307 EOP_ORR_IMM(reg,reg,ror,known_regs.r[i]);
309 dirty_regb &= ~0xff00;
312 /* write dirty pr and "forget" it. Nothing is trashed. */
313 static void tr_release_pr(int r)
315 tr_flush_dirty_pr(r);
316 known_regb &= ~(1 << (r+8));
319 /* fush ARM PSR to r6. Trashes r1 */
320 static void tr_flush_dirty_ST(void)
322 if (!(dirty_regb & KRREG_ST)) return;
323 EOP_BIC_IMM(6,6,0,0x0f);
325 EOP_ORR_REG_LSR(6,6,1,28);
326 dirty_regb &= ~KRREG_ST;
330 /* inverse of above. Trashes r1 */
331 static void tr_make_dirty_ST(void)
333 if (dirty_regb & KRREG_ST) return;
334 if (known_regb & KRREG_ST) {
336 if (known_regs.gr[SSP_ST].h & SSP_FLAG_N) flags |= 8;
337 if (known_regs.gr[SSP_ST].h & SSP_FLAG_Z) flags |= 4;
338 EOP_MSR_IMM(4/2, flags);
340 EOP_MOV_REG_LSL(1, 6, 28);
344 dirty_regb |= KRREG_ST;
347 /* load 16bit val into host reg r0-r3. Nothing is trashed */
348 static void tr_mov16(int r, int val)
350 if (hostreg_r[r] != val) {
351 emit_mov_const(A_COND_AL, r, val);
356 static void tr_mov16_cond(int cond, int r, int val)
358 emit_mov_const(cond, r, val);
363 static void tr_flush_dirty_pmcrs(void)
365 u32 i, val = (u32)-1;
366 if (!(dirty_regb & 0x3ff80000)) return;
368 if (dirty_regb & KRREG_PMC) {
369 val = known_regs.pmc.v;
370 emit_mov_const(A_COND_AL, 1, val);
371 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
373 if (known_regs.emu_status & (SSP_PMC_SET|SSP_PMC_HAVE_ADDR)) {
374 elprintf(EL_ANOMALY, "!! SSP_PMC_SET|SSP_PMC_HAVE_ADDR set on flush\n");
378 for (i = 0; i < 5; i++)
380 if (dirty_regb & (1 << (20+i))) {
381 if (val != known_regs.pmac_read[i]) {
382 val = known_regs.pmac_read[i];
383 emit_mov_const(A_COND_AL, 1, val);
385 EOP_STR_IMM(1,7,0x454+i*4); // pmac_read
387 if (dirty_regb & (1 << (25+i))) {
388 if (val != known_regs.pmac_write[i]) {
389 val = known_regs.pmac_write[i];
390 emit_mov_const(A_COND_AL, 1, val);
392 EOP_STR_IMM(1,7,0x46c+i*4); // pmac_write
395 dirty_regb &= ~0x3ff80000;
399 /* read bank word to r0 (upper bits zero). Thrashes r1. */
400 static void tr_bank_read(int addr) /* word addr 0-0x1ff */
404 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
405 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
406 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
410 EOP_LDRH_IMM(0,breg,(addr&0x7f)<<1); // ldrh r0, [r1, (op&0x7f)<<1]
414 /* write r0 to bank. Trashes r1. */
415 static void tr_bank_write(int addr)
419 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
420 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
421 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
425 EOP_STRH_IMM(0,breg,(addr&0x7f)<<1); // strh r0, [r1, (op&0x7f)<<1]
428 /* handle RAM bank pointer modifiers. if need_modulo, trash r1-r3, else nothing */
429 static void tr_ptrr_mod(int r, int mod, int need_modulo, int count)
431 int modulo_shift = -1; /* unknown */
433 if (mod == 0) return;
435 if (!need_modulo || mod == 1) // +!
437 else if (need_modulo && (known_regb & KRREG_ST)) {
438 modulo_shift = known_regs.gr[SSP_ST].h & 7;
439 if (modulo_shift == 0) modulo_shift = 8;
442 if (modulo_shift == -1)
444 int reg = (r < 4) ? 8 : 9;
446 if (dirty_regb & KRREG_ST) {
447 // avoid flushing ARM flags
448 EOP_AND_IMM(1, 6, 0, 0x70);
449 EOP_SUB_IMM(1, 1, 0, 0x10);
450 EOP_AND_IMM(1, 1, 0, 0x70);
451 EOP_ADD_IMM(1, 1, 0, 0x10);
453 EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,1,6,1,0,0x70); // ands r1, r6, #0x70
454 EOP_C_DOP_IMM(A_COND_EQ,A_OP_MOV,0,0,1,0,0x80); // moveq r1, #0x80
456 EOP_MOV_REG_LSR(1, 1, 4); // mov r1, r1, lsr #4
457 EOP_RSB_IMM(2, 1, 0, 8); // rsb r1, r1, #8
458 EOP_MOV_IMM(3, 8/2, count); // mov r3, #0x01000000
460 EOP_ADD_IMM(1, 1, 0, (r&3)*8); // add r1, r1, #(r&3)*8
461 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
463 EOP_SUB_REG2_LSL(reg,reg,3,2); // sub reg, reg, #0x01000000 << r2
464 else EOP_ADD_REG2_LSL(reg,reg,3,2);
465 EOP_RSB_IMM(1, 1, 0, 32); // rsb r1, r1, #32
466 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
467 hostreg_r[1] = hostreg_r[2] = hostreg_r[3] = -1;
469 else if (known_regb & (1 << (r + 8)))
471 int modulo = (1 << modulo_shift) - 1;
473 known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] - count) & modulo);
474 else known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] + count) & modulo);
478 int reg = (r < 4) ? 8 : 9;
479 int ror = ((r&3) + 1)*8 - (8 - modulo_shift);
480 EOP_MOV_REG_ROR(reg,reg,ror);
481 // {add|sub} reg, reg, #1<<shift
482 EOP_C_DOP_IMM(A_COND_AL,(mod==2)?A_OP_SUB:A_OP_ADD,0,reg,reg, 8/2, count << (8 - modulo_shift));
483 EOP_MOV_REG_ROR(reg,reg,32-ror);
487 /* handle writes r0 to (rX). Trashes r1.
488 * fortunately we can ignore modulo increment modes for writes. */
489 static void tr_rX_write(int op)
493 int mod = (op>>2) & 3; // direct addressing
494 tr_bank_write((op & 0x100) + mod);
498 int r = (op&3) | ((op>>6)&4);
499 if (known_regb & (1 << (r + 8))) {
500 tr_bank_write((op&0x100) | known_regs.r[r]);
502 int reg = (r < 4) ? 8 : 9;
503 int ror = ((4 - (r&3))*8) & 0x1f;
504 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
506 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
507 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
508 else EOP_ADD_REG_LSL(1,7,1,1);
509 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
512 tr_ptrr_mod(r, (op>>2) & 3, 0, 1);
516 /* read (rX) to r0. Trashes r1-r3. */
517 static void tr_rX_read(int r, int mod)
521 tr_bank_read(((r << 6) & 0x100) + mod); // direct addressing
525 if (known_regb & (1 << (r + 8))) {
526 tr_bank_read(((r << 6) & 0x100) | known_regs.r[r]);
528 int reg = (r < 4) ? 8 : 9;
529 int ror = ((4 - (r&3))*8) & 0x1f;
530 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
532 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
533 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
534 else EOP_ADD_REG_LSL(1,7,1,1);
535 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
536 hostreg_r[0] = hostreg_r[1] = -1;
538 tr_ptrr_mod(r, mod, 1, 1);
542 /* read ((rX)) to r0. Trashes r1,r2. */
543 static void tr_rX_read2(int op)
545 int r = (op&3) | ((op>>6)&4); // src
548 tr_bank_read((op&0x100) | ((op>>2)&3));
549 } else if (known_regb & (1 << (r+8))) {
550 tr_bank_read((op&0x100) | known_regs.r[r]);
552 int reg = (r < 4) ? 8 : 9;
553 int ror = ((4 - (r&3))*8) & 0x1f;
554 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
556 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
557 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
558 else EOP_ADD_REG_LSL(1,7,1,1);
559 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
561 EOP_LDR_IMM(2,7,0x48c); // ptr_iram_rom
562 EOP_ADD_REG_LSL(2,2,0,1); // add r2, r2, r0, lsl #1
563 EOP_ADD_IMM(0,0,0,1); // add r0, r0, #1
565 tr_bank_write((op&0x100) | ((op>>2)&3));
566 } else if (known_regb & (1 << (r+8))) {
567 tr_bank_write((op&0x100) | known_regs.r[r]);
569 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
572 EOP_LDRH_SIMPLE(0,2); // ldrh r0, [r2]
573 hostreg_r[0] = hostreg_r[2] = -1;
576 // check if AL is going to be used later in block
577 static int tr_predict_al_need(void)
579 int tmpv, tmpv2, op, pc = known_regs.gr[SSP_PC].h;
588 tmpv2 = (op >> 4) & 0xf; // dst
589 tmpv = op & 0xf; // src
590 if ((tmpv2 == SSP_A && tmpv == SSP_P) || tmpv2 == SSP_AL) // ld A, P; ld AL, *
599 case 0x10: case 0x30: case 0x40: case 0x60: case 0x70:
600 tmpv = op & 0xf; // src
601 if (tmpv == SSP_AL) // OP *, AL
611 case 0x74: pc++; break;
621 // mpya (rj), (ri), b
625 case 0x5b: return 0; // cleared anyway
629 tmpv = op & 0xf; // src
630 if (tmpv == SSP_AL) return 1;
631 case 0x51: case 0x53: case 0x54: case 0x55: case 0x59: case 0x5c:
639 /* get ARM cond which would mean that SSP cond is satisfied. No trash. */
640 static int tr_cond_check(int op)
642 int f = (op & 0x100) >> 8;
644 case 0x00: return A_COND_AL; /* always true */
645 case 0x50: /* Z matches f(?) bit */
646 if (dirty_regb & KRREG_ST) return f ? A_COND_EQ : A_COND_NE;
647 EOP_TST_IMM(6, 0, 4);
648 return f ? A_COND_NE : A_COND_EQ;
649 case 0x70: /* N matches f(?) bit */
650 if (dirty_regb & KRREG_ST) return f ? A_COND_MI : A_COND_PL;
651 EOP_TST_IMM(6, 0, 8);
652 return f ? A_COND_NE : A_COND_EQ;
654 elprintf(EL_ANOMALY, "unimplemented cond?\n");
660 static int tr_neg_cond(int cond)
663 case A_COND_AL: elprintf(EL_ANOMALY, "neg for AL?\n"); exit(1);
664 case A_COND_EQ: return A_COND_NE;
665 case A_COND_NE: return A_COND_EQ;
666 case A_COND_MI: return A_COND_PL;
667 case A_COND_PL: return A_COND_MI;
668 default: elprintf(EL_ANOMALY, "bad cond for neg\n"); exit(1);
673 static int tr_aop_ssp2arm(int op)
676 case 1: return A_OP_SUB;
677 case 3: return A_OP_CMP;
678 case 4: return A_OP_ADD;
679 case 5: return A_OP_AND;
680 case 6: return A_OP_ORR;
681 case 7: return A_OP_EOR;
688 // -----------------------------------------------------
692 //@ r6: STACK and emu flags
696 // read general reg to r0. Trashes r1
697 static void tr_GR0_to_r0(int op)
702 static void tr_X_to_r0(int op)
704 if (hostreg_r[0] != (SSP_X<<16)) {
705 EOP_MOV_REG_LSR(0, 4, 16); // mov r0, r4, lsr #16
706 hostreg_r[0] = SSP_X<<16;
710 static void tr_Y_to_r0(int op)
712 if (hostreg_r[0] != (SSP_Y<<16)) {
713 EOP_MOV_REG_SIMPLE(0, 4); // mov r0, r4
714 hostreg_r[0] = SSP_Y<<16;
718 static void tr_A_to_r0(int op)
720 if (hostreg_r[0] != (SSP_A<<16)) {
721 EOP_MOV_REG_LSR(0, 5, 16); // mov r0, r5, lsr #16 @ AH
722 hostreg_r[0] = SSP_A<<16;
726 static void tr_ST_to_r0(int op)
728 // VR doesn't need much accuracy here..
729 EOP_MOV_REG_LSR(0, 6, 4); // mov r0, r6, lsr #4
730 EOP_AND_IMM(0, 0, 0, 0x67); // and r0, r0, #0x67
734 static void tr_STACK_to_r0(int op)
737 EOP_SUB_IMM(6, 6, 8/2, 0x20); // sub r6, r6, #1<<29
738 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
739 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
740 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
741 EOP_LDRH_SIMPLE(0, 1); // ldrh r0, [r1]
742 hostreg_r[0] = hostreg_r[1] = -1;
745 static void tr_PC_to_r0(int op)
747 tr_mov16(0, known_regs.gr[SSP_PC].h);
750 static void tr_P_to_r0(int op)
753 EOP_MOV_REG_LSR(0, 10, 16); // mov r0, r10, lsr #16
757 static void tr_AL_to_r0(int op)
760 if (known_regb & KRREG_PMC) {
761 known_regs.emu_status &= ~(SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
763 EOP_LDR_IMM(0,7,0x484); // ldr r1, [r7, #0x484] // emu_status
764 EOP_BIC_IMM(0,0,0,SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
765 EOP_STR_IMM(0,7,0x484);
769 if (hostreg_r[0] != (SSP_AL<<16)) {
770 EOP_MOV_REG_SIMPLE(0, 5); // mov r0, r5
771 hostreg_r[0] = SSP_AL<<16;
775 static void tr_PMX_to_r0(int reg)
777 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
779 known_regs.pmac_read[reg] = known_regs.pmc.v;
780 known_regs.emu_status &= ~SSP_PMC_SET;
781 known_regb |= 1 << (20+reg);
782 dirty_regb |= 1 << (20+reg);
786 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (20+reg))))
788 u32 pmcv = known_regs.pmac_read[reg];
790 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
792 if ((mode & 0xfff0) == 0x0800)
794 EOP_LDR_IMM(1,7,0x488); // rom_ptr
795 emit_mov_const(A_COND_AL, 0, (pmcv&0xfffff)<<1);
796 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
797 known_regs.pmac_read[reg] += 1;
799 else if ((mode & 0x47ff) == 0x0018) // DRAM
801 int inc = get_inc(mode);
802 EOP_LDR_IMM(1,7,0x490); // dram_ptr
803 emit_mov_const(A_COND_AL, 0, (pmcv&0xffff)<<1);
804 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
805 if (reg == 4 && (pmcv == 0x187f03 || pmcv == 0x187f04)) // wait loop detection
807 int flag = (pmcv == 0x187f03) ? SSP_WAIT_30FE06 : SSP_WAIT_30FE08;
809 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
810 EOP_TST_REG_SIMPLE(0,0);
811 EOP_C_DOP_IMM(A_COND_EQ,A_OP_SUB,0,11,11,22/2,1); // subeq r11, r11, #1024
812 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1,24/2,flag>>8); // orreq r1, r1, #SSP_WAIT_30FE08
813 EOP_STR_IMM(1,7,0x484); // str r1, [r7, #0x484] // emu_status
815 known_regs.pmac_read[reg] += inc;
821 known_regs.pmc.v = known_regs.pmac_read[reg];
822 //known_regb |= KRREG_PMC;
823 dirty_regb |= KRREG_PMC;
824 dirty_regb |= 1 << (20+reg);
825 hostreg_r[0] = hostreg_r[1] = -1;
829 known_regb &= ~KRREG_PMC;
830 dirty_regb &= ~KRREG_PMC;
831 known_regb &= ~(1 << (20+reg));
832 dirty_regb &= ~(1 << (20+reg));
834 // call the C code to handle this
836 //tr_flush_dirty_pmcrs();
838 emit_call(A_COND_AL, ssp_pm_read);
842 static void tr_PM0_to_r0(int op)
847 static void tr_PM1_to_r0(int op)
852 static void tr_PM2_to_r0(int op)
857 static void tr_XST_to_r0(int op)
859 EOP_ADD_IMM(0, 7, 24/2, 4); // add r0, r7, #0x400
860 EOP_LDRH_IMM(0, 0, SSP_XST*4+2);
863 static void tr_PM4_to_r0(int op)
868 static void tr_PMC_to_r0(int op)
870 if (known_regb & KRREG_PMC)
872 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
873 known_regs.emu_status |= SSP_PMC_SET;
874 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
875 // do nothing - this is handled elsewhere
877 tr_mov16(0, known_regs.pmc.l);
878 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
883 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
886 EOP_LDR_IMM(0, 7, 0x400+SSP_PMC*4);
887 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
888 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
889 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
890 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
891 EOP_STR_IMM(1,7,0x484);
892 hostreg_r[0] = hostreg_r[1] = -1;
897 typedef void (tr_read_func)(int op);
899 static tr_read_func *tr_read_funcs[16] =
914 (tr_read_func *)tr_unhandled,
920 // write r0 to general reg handlers. Trashes r1
921 #define TR_WRITE_R0_TO_REG(reg) \
923 hostreg_sspreg_changed(reg); \
924 hostreg_r[0] = (reg)<<16; \
925 if (const_val != -1) { \
926 known_regs.gr[reg].h = const_val; \
927 known_regb |= 1 << (reg); \
929 known_regb &= ~(1 << (reg)); \
933 static void tr_r0_to_GR0(int const_val)
938 static void tr_r0_to_X(int const_val)
940 EOP_MOV_REG_LSL(4, 4, 16); // mov r4, r4, lsl #16
941 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
942 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
943 dirty_regb |= KRREG_P; // touching X or Y makes P dirty.
944 TR_WRITE_R0_TO_REG(SSP_X);
947 static void tr_r0_to_Y(int const_val)
949 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
950 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
951 EOP_MOV_REG_ROR(4, 4, 16); // mov r4, r4, ror #16
952 dirty_regb |= KRREG_P;
953 TR_WRITE_R0_TO_REG(SSP_Y);
956 static void tr_r0_to_A(int const_val)
958 if (tr_predict_al_need()) {
959 EOP_MOV_REG_LSL(5, 5, 16); // mov r5, r5, lsl #16
960 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16 @ AL
961 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
964 EOP_MOV_REG_LSL(5, 0, 16);
965 TR_WRITE_R0_TO_REG(SSP_A);
968 static void tr_r0_to_ST(int const_val)
970 // VR doesn't need much accuracy here..
971 EOP_AND_IMM(1, 0, 0, 0x67); // and r1, r0, #0x67
972 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
973 EOP_ORR_REG_LSL(6, 6, 1, 4); // orr r6, r6, r1, lsl #4
974 TR_WRITE_R0_TO_REG(SSP_ST);
976 dirty_regb &= ~KRREG_ST;
979 static void tr_r0_to_STACK(int const_val)
982 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
983 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
984 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
985 EOP_STRH_SIMPLE(0, 1); // strh r0, [r1]
986 EOP_ADD_IMM(6, 6, 8/2, 0x20); // add r6, r6, #1<<29
990 static void tr_r0_to_PC(int const_val)
993 * do nothing - dispatcher will take care of this
994 EOP_MOV_REG_LSL(1, 0, 16); // mov r1, r0, lsl #16
995 EOP_STR_IMM(1,7,0x400+6*4); // str r1, [r7, #(0x400+6*8)]
1000 static void tr_r0_to_AL(int const_val)
1002 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16
1003 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
1004 EOP_MOV_REG_ROR(5, 5, 16); // mov r5, r5, ror #16
1005 hostreg_sspreg_changed(SSP_AL);
1006 if (const_val != -1) {
1007 known_regs.gr[SSP_A].l = const_val;
1008 known_regb |= 1 << SSP_AL;
1010 known_regb &= ~(1 << SSP_AL);
1013 static void tr_r0_to_PMX(int reg)
1015 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
1017 known_regs.pmac_write[reg] = known_regs.pmc.v;
1018 known_regs.emu_status &= ~SSP_PMC_SET;
1019 known_regb |= 1 << (25+reg);
1020 dirty_regb |= 1 << (25+reg);
1024 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (25+reg))))
1028 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1030 mode = known_regs.pmac_write[reg]>>16;
1031 addr = known_regs.pmac_write[reg]&0xffff;
1032 if ((mode & 0x43ff) == 0x0018) // DRAM
1034 int inc = get_inc(mode);
1035 if (mode & 0x0400) tr_unhandled();
1036 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1037 emit_mov_const(A_COND_AL, 2, addr<<1);
1038 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1039 known_regs.pmac_write[reg] += inc;
1041 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
1043 if (mode & 0x0400) tr_unhandled();
1044 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1045 emit_mov_const(A_COND_AL, 2, addr<<1);
1046 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1047 known_regs.pmac_write[reg] += (addr&1) ? 31 : 1;
1049 else if ((mode & 0x47ff) == 0x001c) // IRAM
1051 int inc = get_inc(mode);
1052 EOP_LDR_IMM(1,7,0x48c); // iram_ptr
1053 emit_mov_const(A_COND_AL, 2, (addr&0x3ff)<<1);
1054 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1056 EOP_STR_IMM(1,7,0x494); // iram_dirty
1057 known_regs.pmac_write[reg] += inc;
1062 known_regs.pmc.v = known_regs.pmac_write[reg];
1063 //known_regb |= KRREG_PMC;
1064 dirty_regb |= KRREG_PMC;
1065 dirty_regb |= 1 << (25+reg);
1066 hostreg_r[1] = hostreg_r[2] = -1;
1070 known_regb &= ~KRREG_PMC;
1071 dirty_regb &= ~KRREG_PMC;
1072 known_regb &= ~(1 << (25+reg));
1073 dirty_regb &= ~(1 << (25+reg));
1075 // call the C code to handle this
1076 tr_flush_dirty_ST();
1077 //tr_flush_dirty_pmcrs();
1079 emit_call(A_COND_AL, ssp_pm_write);
1083 static void tr_r0_to_PM0(int const_val)
1088 static void tr_r0_to_PM1(int const_val)
1093 static void tr_r0_to_PM2(int const_val)
1098 static void tr_r0_to_PM4(int const_val)
1103 static void tr_r0_to_PMC(int const_val)
1105 if ((known_regb & KRREG_PMC) && const_val != -1)
1107 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
1108 known_regs.emu_status |= SSP_PMC_SET;
1109 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1110 known_regs.pmc.h = const_val;
1112 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
1113 known_regs.pmc.l = const_val;
1118 tr_flush_dirty_ST();
1119 if (known_regb & KRREG_PMC) {
1120 emit_mov_const(A_COND_AL, 1, known_regs.pmc.v);
1121 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
1122 known_regb &= ~KRREG_PMC;
1123 dirty_regb &= ~KRREG_PMC;
1125 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
1126 EOP_ADD_IMM(2,7,24/2,4); // add r2, r7, #0x400
1127 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
1128 EOP_C_AM3_IMM(A_COND_EQ,1,0,2,0,0,1,SSP_PMC*4); // strxx r0, [r2, #SSP_PMC]
1129 EOP_C_AM3_IMM(A_COND_NE,1,0,2,0,0,1,SSP_PMC*4+2);
1130 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
1131 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
1132 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
1133 EOP_STR_IMM(1,7,0x484);
1134 hostreg_r[1] = hostreg_r[2] = -1;
1138 typedef void (tr_write_func)(int const_val);
1140 static tr_write_func *tr_write_funcs[16] =
1149 (tr_write_func *)tr_unhandled,
1153 (tr_write_func *)tr_unhandled,
1155 (tr_write_func *)tr_unhandled,
1160 static void tr_mac_load_XY(int op)
1162 tr_rX_read(op&3, (op>>2)&3); // X
1163 EOP_MOV_REG_LSL(4, 0, 16);
1164 tr_rX_read(((op>>4)&3)|4, (op>>6)&3); // Y
1165 EOP_ORR_REG_SIMPLE(4, 0);
1166 dirty_regb |= KRREG_P;
1167 hostreg_sspreg_changed(SSP_X);
1168 hostreg_sspreg_changed(SSP_Y);
1169 known_regb &= ~KRREG_X;
1170 known_regb &= ~KRREG_Y;
1173 // -----------------------------------------------------
1175 static int tr_detect_set_pm(unsigned int op, int *pc, int imm)
1178 if (!((op&0xfef0) == 0x08e0 && (PROGRAM(*pc)&0xfef0) == 0x08e0)) return 0;
1184 pmcv = imm | (PROGRAM((*pc)++) << 16);
1185 known_regs.pmc.v = pmcv;
1186 known_regb |= KRREG_PMC;
1187 dirty_regb |= KRREG_PMC;
1188 known_regs.emu_status |= SSP_PMC_SET;
1191 // check for possible reg programming
1192 tmpv = PROGRAM(*pc);
1193 if ((tmpv & 0xfff8) == 0x08 || (tmpv & 0xff8f) == 0x80)
1195 int is_write = (tmpv & 0xff8f) == 0x80;
1196 int reg = is_write ? ((tmpv>>4)&0x7) : (tmpv&0x7);
1197 if (reg > 4) tr_unhandled();
1198 if ((tmpv & 0x0f) != 0 && (tmpv & 0xf0) != 0) tr_unhandled();
1199 known_regs.pmac_read[is_write ? reg + 5 : reg] = pmcv;
1200 known_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1201 dirty_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1202 known_regs.emu_status &= ~SSP_PMC_SET;
1212 static const short pm0_block_seq[] = { 0x0880, 0, 0x0880, 0, 0x0840, 0x60 };
1214 static int tr_detect_pm0_block(unsigned int op, int *pc, int imm)
1221 if (op != 0x0840 || imm != 0) return 0;
1222 pp = PROGRAM_P(*pc);
1223 if (memcmp(pp, pm0_block_seq, sizeof(pm0_block_seq)) != 0) return 0;
1225 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
1226 EOP_ORR_IMM(6, 6, 24/2, 6); // orr r6, r6, 0x600
1227 hostreg_sspreg_changed(SSP_ST);
1228 known_regs.gr[SSP_ST].h = 0x60;
1229 known_regb |= 1 << SSP_ST;
1230 dirty_regb &= ~KRREG_ST;
1236 static int tr_detect_rotate(unsigned int op, int *pc, int imm)
1242 if (op != 0x02e3 || PROGRAM(*pc) != 0x04e3 || PROGRAM(*pc + 1) != 0x000f) return 0;
1245 EOP_MOV_REG_LSL(0, 0, 4);
1246 EOP_ORR_REG_LSR(0, 0, 0, 16);
1253 // -----------------------------------------------------
1255 static int translate_op(unsigned int op, int *pc, int imm, int *end_cond, int *jump_pc)
1257 u32 tmpv, tmpv2, tmpv3;
1259 known_regs.gr[SSP_PC].h = *pc;
1265 if (op == 0) { ret++; break; } // nop
1266 tmpv = op & 0xf; // src
1267 tmpv2 = (op >> 4) & 0xf; // dst
1268 if (tmpv2 == SSP_A && tmpv == SSP_P) { // ld A, P
1270 EOP_MOV_REG_SIMPLE(5, 10);
1271 hostreg_sspreg_changed(SSP_A);
1272 known_regb &= ~(KRREG_A|KRREG_AL);
1275 tr_read_funcs[tmpv](op);
1276 tr_write_funcs[tmpv2]((known_regb & (1 << tmpv)) ? known_regs.gr[tmpv].h : -1);
1277 if (tmpv2 == SSP_PC) {
1279 *end_cond = -A_COND_AL;
1285 int r = (op&3) | ((op>>6)&4);
1286 int mod = (op>>2)&3;
1287 tmpv = (op >> 4) & 0xf; // dst
1288 ret = tr_detect_rotate(op, pc, imm);
1294 while (PROGRAM(*pc) == op) {
1295 (*pc)++; cnt++; ret++;
1298 tr_ptrr_mod(r, mod, 1, cnt); // skip
1300 tr_write_funcs[tmpv](-1);
1301 if (tmpv == SSP_PC) {
1303 *end_cond = -A_COND_AL;
1310 tmpv = (op >> 4) & 0xf; // src
1311 tr_read_funcs[tmpv](op);
1317 tr_bank_read(op&0x1ff);
1323 tmpv = (op & 0xf0) >> 4; // dst
1324 ret = tr_detect_pm0_block(op, pc, imm);
1326 ret = tr_detect_set_pm(op, pc, imm);
1329 tr_write_funcs[tmpv](imm);
1330 if (tmpv == SSP_PC) {
1338 tmpv2 = (op >> 4) & 0xf; // dst
1340 tr_write_funcs[tmpv2](-1);
1341 if (tmpv2 == SSP_PC) {
1343 *end_cond = -A_COND_AL;
1356 tr_bank_write(op&0x1ff);
1362 r = (op&3) | ((op>>6)&4); // src
1363 tmpv2 = (op >> 4) & 0xf; // dst
1364 if ((r&3) == 3) tr_unhandled();
1366 if (known_regb & (1 << (r+8))) {
1367 tr_mov16(0, known_regs.r[r]);
1368 tr_write_funcs[tmpv2](known_regs.r[r]);
1370 int reg = (r < 4) ? 8 : 9;
1371 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1372 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1374 tr_write_funcs[tmpv2](-1);
1382 r = (op&3) | ((op>>6)&4); // dst
1383 tmpv = (op >> 4) & 0xf; // src
1384 if ((r&3) == 3) tr_unhandled();
1386 if (known_regb & (1 << tmpv)) {
1387 known_regs.r[r] = known_regs.gr[tmpv].h;
1388 known_regb |= 1 << (r + 8);
1389 dirty_regb |= 1 << (r + 8);
1391 int reg = (r < 4) ? 8 : 9;
1392 int ror = ((4 - (r&3))*8) & 0x1f;
1393 tr_read_funcs[tmpv](op);
1394 EOP_BIC_IMM(reg, reg, ror/2, 0xff); // bic r{7,8}, r{7,8}, <mask>
1395 EOP_AND_IMM(0, 0, 0, 0xff); // and r0, r0, 0xff
1396 EOP_ORR_REG_LSL(reg, reg, 0, (r&3)*8); // orr r{7,8}, r{7,8}, r0, lsl #lsl
1398 known_regb &= ~(1 << (r+8));
1399 dirty_regb &= ~(1 << (r+8));
1405 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
1407 known_regs.r[tmpv] = op;
1408 known_regb |= 1 << (tmpv + 8);
1409 dirty_regb |= 1 << (tmpv + 8);
1414 u32 *jump_op = NULL;
1415 tmpv = tr_cond_check(op);
1416 if (tmpv != A_COND_AL) {
1417 jump_op = tcache_ptr;
1418 EOP_MOV_IMM(0, 0, 0); // placeholder for branch
1421 tr_r0_to_STACK(*pc);
1422 if (tmpv != A_COND_AL) {
1423 u32 *real_ptr = tcache_ptr;
1424 tcache_ptr = jump_op;
1425 EOP_C_B(tr_neg_cond(tmpv),0,real_ptr - jump_op - 2);
1426 tcache_ptr = real_ptr;
1428 tr_mov16_cond(tmpv, 0, imm);
1429 if (tmpv != A_COND_AL)
1430 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1431 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1440 tmpv2 = (op >> 4) & 0xf; // dst
1442 EOP_LDR_IMM(1,7,0x48c); // ptr_iram_rom
1443 EOP_ADD_REG_LSL(0,1,0,1); // add r0, r1, r0, lsl #1
1444 EOP_LDRH_SIMPLE(0,0); // ldrh r0, [r0]
1445 hostreg_r[0] = hostreg_r[1] = -1;
1446 tr_write_funcs[tmpv2](-1);
1447 if (tmpv2 == SSP_PC) {
1449 *end_cond = -A_COND_AL;
1455 tmpv = tr_cond_check(op);
1456 tr_mov16_cond(tmpv, 0, imm);
1457 if (tmpv != A_COND_AL)
1458 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1459 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1467 // check for repeats of this op
1469 while (PROGRAM(*pc) == op && (op & 7) != 6) {
1473 if ((op&0xf0) != 0) // !always
1476 tmpv2 = tr_cond_check(op);
1478 case 2: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_ASR,5); break; // shr (arithmetic)
1479 case 3: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_LSL,5); break; // shl
1480 case 6: EOP_C_DOP_IMM(tmpv2,A_OP_RSB,1,5,5,0,0); break; // neg
1481 case 7: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_EOR,0,5,1,31,A_AM1_ASR,5); // eor r1, r5, r5, asr #31
1482 EOP_C_DOP_REG_XIMM(tmpv2,A_OP_ADD,1,1,5,31,A_AM1_LSR,5); // adds r5, r1, r5, lsr #31
1483 hostreg_r[1] = -1; break; // abs
1484 default: tr_unhandled();
1487 hostreg_sspreg_changed(SSP_A);
1488 dirty_regb |= KRREG_ST;
1489 known_regb &= ~KRREG_ST;
1490 known_regb &= ~(KRREG_A|KRREG_AL);
1499 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_SUB,1,5,5,0,A_AM1_LSL,10); // subs r5, r5, r10
1500 hostreg_sspreg_changed(SSP_A);
1501 known_regb &= ~(KRREG_A|KRREG_AL);
1502 dirty_regb |= KRREG_ST;
1505 // mpya (rj), (ri), b
1510 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_ADD,1,5,5,0,A_AM1_LSL,10); // adds r5, r5, r10
1511 hostreg_sspreg_changed(SSP_A);
1512 known_regb &= ~(KRREG_A|KRREG_AL);
1513 dirty_regb |= KRREG_ST;
1516 // mld (rj), (ri), b
1518 EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,1,0,5,0,0); // movs r5, #0
1519 hostreg_sspreg_changed(SSP_A);
1520 known_regs.gr[SSP_A].v = 0;
1521 known_regb |= (KRREG_A|KRREG_AL);
1522 dirty_regb |= KRREG_ST;
1533 tmpv = op & 0xf; // src
1534 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1535 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1536 if (tmpv == SSP_P) {
1538 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL,10); // OPs r5, r5, r10
1539 } else if (tmpv == SSP_A) {
1540 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL, 5); // OPs r5, r5, r5
1542 tr_read_funcs[tmpv](op);
1543 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL, 0); // OPs r5, r5, r0, lsl #16
1545 hostreg_sspreg_changed(SSP_A);
1546 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1547 dirty_regb |= KRREG_ST;
1557 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1558 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1559 tr_rX_read((op&3)|((op>>6)&4), (op>>2)&3);
1560 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1561 hostreg_sspreg_changed(SSP_A);
1562 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1563 dirty_regb |= KRREG_ST;
1573 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1574 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1575 tr_bank_read(op&0x1ff);
1576 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1577 hostreg_sspreg_changed(SSP_A);
1578 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1579 dirty_regb |= KRREG_ST;
1589 tmpv = (op & 0xf0) >> 4;
1590 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1591 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1593 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1594 hostreg_sspreg_changed(SSP_A);
1595 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1596 dirty_regb |= KRREG_ST;
1606 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1607 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1609 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1610 hostreg_sspreg_changed(SSP_A);
1611 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1612 dirty_regb |= KRREG_ST;
1623 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1624 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1625 r = (op&3) | ((op>>6)&4); // src
1626 if ((r&3) == 3) tr_unhandled();
1628 if (known_regb & (1 << (r+8))) {
1629 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,known_regs.r[r]); // OPs r5, r5, #val<<16
1631 int reg = (r < 4) ? 8 : 9;
1632 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1633 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1634 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1637 hostreg_sspreg_changed(SSP_A);
1638 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1639 dirty_regb |= KRREG_ST;
1650 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1651 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1652 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,op & 0xff); // OPs r5, r5, #val<<16
1653 hostreg_sspreg_changed(SSP_A);
1654 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1655 dirty_regb |= KRREG_ST;
1664 static void emit_block_prologue(void)
1666 // check if there are enough cycles..
1667 // note: r0 must contain PC of current block
1668 EOP_CMP_IMM(11,0,0); // cmp r11, #0
1669 emit_jump(A_COND_LE, ssp_drc_end);
1673 * >0: direct (un)conditional jump
1676 static void emit_block_epilogue(int cycles, int cond, int pc, int end_pc)
1678 if (cycles > 0xff) { elprintf(EL_ANOMALY, "large cycle count: %i\n", cycles); cycles = 0xff; }
1679 EOP_SUB_IMM(11,11,0,cycles); // sub r11, r11, #cycles
1681 if (cond < 0 || (end_pc >= 0x400 && pc < 0x400)) {
1682 // indirect jump, or rom -> iram jump, must use dispatcher
1683 emit_jump(A_COND_AL, ssp_drc_next);
1685 else if (cond == A_COND_AL) {
1686 u32 *target = (pc < 0x400) ?
1687 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1688 ssp_block_table[pc];
1690 emit_jump(A_COND_AL, target);
1692 int ops = emit_jump(A_COND_AL, ssp_drc_next);
1693 // cause the next block to be emitted over jump instruction
1698 u32 *target1 = (pc < 0x400) ?
1699 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1700 ssp_block_table[pc];
1701 u32 *target2 = (end_pc < 0x400) ?
1702 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + end_pc] :
1703 ssp_block_table[end_pc];
1704 if (target1 != NULL)
1705 emit_jump(cond, target1);
1706 if (target2 != NULL)
1707 emit_jump(tr_neg_cond(cond), target2); // neg_cond, to be able to swap jumps if needed
1709 // emit patchable branches
1710 if (target1 == NULL)
1711 emit_call(cond, ssp_drc_next_patch);
1712 if (target2 == NULL)
1713 emit_call(tr_neg_cond(cond), ssp_drc_next_patch);
1715 // won't patch indirect jumps
1716 if (target1 == NULL || target2 == NULL)
1717 emit_jump(A_COND_AL, ssp_drc_next);
1722 void *ssp_translate_block(int pc)
1724 unsigned int op, op1, imm, ccount = 0;
1725 unsigned int *block_start;
1726 int ret, end_cond = A_COND_AL, jump_pc = -1;
1728 //printf("translate %04x -> %04x\n", pc<<1, (tcache_ptr-tcache)<<2);
1730 block_start = tcache_ptr;
1732 dirty_regb = KRREG_P;
1733 known_regs.emu_status = 0;
1736 emit_block_prologue();
1738 for (; ccount < 100;)
1744 if ((op1 & 0xf) == 4 || (op1 & 0xf) == 6)
1745 imm = PROGRAM(pc++); // immediate
1747 ret = translate_op(op, &pc, imm, &end_cond, &jump_pc);
1750 elprintf(EL_ANOMALY, "NULL func! op=%08x (%02x)\n", op, op1);
1754 ccount += ret & 0xffff;
1755 if (ret & 0x10000) break;
1758 if (ccount >= 100) {
1759 end_cond = A_COND_AL;
1761 emit_mov_const(A_COND_AL, 0, pc);
1764 tr_flush_dirty_prs();
1765 tr_flush_dirty_ST();
1766 tr_flush_dirty_pmcrs();
1767 emit_block_epilogue(ccount, end_cond, jump_pc, pc);
1769 if (tcache_ptr - tcache > DRC_TCACHE_SIZE/4) {
1770 elprintf(EL_ANOMALY|EL_STATUS|EL_SVP, "tcache overflow!\n");
1777 //printf("%i blocks, %i bytes, k=%.3f\n", nblocks, (tcache_ptr - tcache)*4,
1778 // (double)(tcache_ptr - tcache) / (double)n_in_ops);
1782 FILE *f = fopen("tcache.bin", "wb");
1783 fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
1786 printf("dumped tcache.bin\n");
1797 // -----------------------------------------------------
1799 static void ssp1601_state_load(void)
1801 ssp->drc.iram_dirty = 1;
1802 ssp->drc.iram_context = 0;
1805 void ssp1601_dyn_exit(void)
1807 free(ssp_block_table);
1808 free(ssp_block_table_iram);
1809 ssp_block_table = ssp_block_table_iram = NULL;
1814 int ssp1601_dyn_startup(void)
1818 ssp_block_table = calloc(sizeof(ssp_block_table[0]), SSP_BLOCKTAB_ENTS);
1819 if (ssp_block_table == NULL)
1821 ssp_block_table_iram = calloc(sizeof(ssp_block_table_iram[0]), SSP_BLOCKTAB_IRAM_ENTS);
1822 if (ssp_block_table_iram == NULL) {
1823 free(ssp_block_table);
1827 memset(tcache, 0, DRC_TCACHE_SIZE);
1828 tcache_ptr = tcache;
1830 PicoLoadStateHook = ssp1601_state_load;
1835 ssp_block_table[0x800/2] = (void *) ssp_hle_800;
1836 ssp_block_table[0x902/2] = (void *) ssp_hle_902;
1837 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ENTS + 0x030/2] = (void *) ssp_hle_07_030;
1838 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ENTS + 0x036/2] = (void *) ssp_hle_07_036;
1839 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ENTS + 0x6d6/2] = (void *) ssp_hle_07_6d6;
1840 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ENTS + 0x12c/2] = (void *) ssp_hle_11_12c;
1841 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ENTS + 0x384/2] = (void *) ssp_hle_11_384;
1842 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ENTS + 0x38a/2] = (void *) ssp_hle_11_38a;
1849 void ssp1601_dyn_reset(ssp1601_t *ssp)
1852 ssp->drc.iram_dirty = 1;
1853 ssp->drc.iram_context = 0;
1854 // must do this here because ssp is not available @ startup()
1855 ssp->drc.ptr_rom = (u32) Pico.rom;
1856 ssp->drc.ptr_iram_rom = (u32) svp->iram_rom;
1857 ssp->drc.ptr_dram = (u32) svp->dram;
1858 ssp->drc.ptr_btable = (u32) ssp_block_table;
1859 ssp->drc.ptr_btable_iram = (u32) ssp_block_table_iram;
1861 // prevent new versions of IRAM from appearing
1862 memset(svp->iram_rom, 0, 0x800);
1866 void ssp1601_dyn_run(int cycles)
1868 if (ssp->emu_status & SSP_WAIT_MASK) return;
1871 ssp_translate_block(DUMP_BLOCK >> 1);
1874 ssp_drc_entry(cycles);