2 * SSP1601 to ARM recompiler
3 * (C) notaz, 2008,2009,2010
5 * This work is licensed under the terms of MAME license.
6 * See COPYING file in the top-level directory.
9 #include "../../pico_int.h"
10 #include "../../../cpu/drc/cmn.h"
13 // FIXME: asm has these hardcoded
14 #define SSP_BLOCKTAB_ENTS (0x5090/2)
15 #define SSP_BLOCKTAB_IRAM_ONE (0x800/2) // table entries
16 #define SSP_BLOCKTAB_IRAM_ENTS (15*SSP_BLOCKTAB_IRAM_ONE)
18 static u32 **ssp_block_table; // [0x5090/2];
19 static u32 **ssp_block_table_iram; // [15][0x800/2];
21 static u32 *tcache_ptr = NULL;
23 static int nblocks = 0;
24 static int n_in_ops = 0;
26 extern ssp1601_t *ssp;
28 #define rPC ssp->gr[SSP_PC].h
29 #define rPMC ssp->gr[SSP_PMC]
31 #define SSP_FLAG_Z (1<<0xd)
32 #define SSP_FLAG_N (1<<0xf)
35 //#define DUMP_BLOCK 0x0c9a
36 void ssp_drc_next(void){}
37 void ssp_drc_next_patch(void){}
38 void ssp_drc_end(void){}
42 #include "../../../cpu/drc/emit_arm.c"
44 // -----------------------------------------------------
46 static int get_inc(int mode)
48 int inc = (mode >> 11) & 7;
51 inc = 1 << inc; // 0 1 2 4 8 16 32 128
52 if (mode & 0x8000) inc = -inc; // decrement mode
57 u32 ssp_pm_read(int reg)
61 if (ssp->emu_status & SSP_PMC_SET)
63 ssp->pmac_read[reg] = rPMC.v;
64 ssp->emu_status &= ~SSP_PMC_SET;
69 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
71 mode = ssp->pmac_read[reg]>>16;
72 if ((mode & 0xfff0) == 0x0800) // ROM
74 d = ((unsigned short *)Pico.rom)[ssp->pmac_read[reg]&0xfffff];
75 ssp->pmac_read[reg] += 1;
77 else if ((mode & 0x47ff) == 0x0018) // DRAM
79 unsigned short *dram = (unsigned short *)svp->dram;
80 int inc = get_inc(mode);
81 d = dram[ssp->pmac_read[reg]&0xffff];
82 ssp->pmac_read[reg] += inc;
85 // PMC value corresponds to last PMR accessed
86 rPMC.v = ssp->pmac_read[reg];
91 #define overwrite_write(dst, d) \
93 if (d & 0xf000) { dst &= ~0xf000; dst |= d & 0xf000; } \
94 if (d & 0x0f00) { dst &= ~0x0f00; dst |= d & 0x0f00; } \
95 if (d & 0x00f0) { dst &= ~0x00f0; dst |= d & 0x00f0; } \
96 if (d & 0x000f) { dst &= ~0x000f; dst |= d & 0x000f; } \
99 void ssp_pm_write(u32 d, int reg)
101 unsigned short *dram;
104 if (ssp->emu_status & SSP_PMC_SET)
106 ssp->pmac_write[reg] = rPMC.v;
107 ssp->emu_status &= ~SSP_PMC_SET;
112 ssp->emu_status &= ~SSP_PMC_HAVE_ADDR;
114 dram = (unsigned short *)svp->dram;
115 mode = ssp->pmac_write[reg]>>16;
116 addr = ssp->pmac_write[reg]&0xffff;
117 if ((mode & 0x43ff) == 0x0018) // DRAM
119 int inc = get_inc(mode);
121 overwrite_write(dram[addr], d);
122 } else dram[addr] = d;
123 ssp->pmac_write[reg] += inc;
125 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
128 overwrite_write(dram[addr], d);
129 } else dram[addr] = d;
130 ssp->pmac_write[reg] += (addr&1) ? 0x1f : 1;
132 else if ((mode & 0x47ff) == 0x001c) // IRAM
134 int inc = get_inc(mode);
135 ((unsigned short *)svp->iram_rom)[addr&0x3ff] = d;
136 ssp->pmac_write[reg] += inc;
137 ssp->drc.iram_dirty = 1;
140 rPMC.v = ssp->pmac_write[reg];
144 // -----------------------------------------------------
147 static unsigned char iram_context_map[] =
149 0, 0, 0, 0, 1, 0, 0, 0, // 04
150 0, 0, 0, 0, 0, 0, 2, 0, // 0e
151 0, 0, 0, 0, 0, 3, 0, 4, // 15 17
152 5, 0, 0, 6, 0, 7, 0, 0, // 18 1b 1d
153 8, 9, 0, 0, 0,10, 0, 0, // 20 21 25
154 0, 0, 0, 0, 0, 0, 0, 0,
155 0, 0,11, 0, 0,12, 0, 0, // 32 35
156 13,14, 0, 0, 0, 0, 0, 0 // 38 39
159 int ssp_get_iram_context(void)
161 unsigned char *ir = (unsigned char *)svp->iram_rom;
162 int val1, val = ir[0x083^1] + ir[0x4FA^1] + ir[0x5F7^1] + ir[0x47B^1];
163 val1 = iram_context_map[(val>>1)&0x3f];
166 elprintf(EL_ANOMALY, "svp: iram ctx val: %02x PC=%04x\n", (val>>1)&0x3f, rPC);
167 //debug_dump2file(name, svp->iram_rom, 0x800);
173 // -----------------------------------------------------
175 /* regs with known values */
180 unsigned int pmac_read[5];
181 unsigned int pmac_write[5];
183 unsigned int emu_status;
186 #define KRREG_X (1 << SSP_X)
187 #define KRREG_Y (1 << SSP_Y)
188 #define KRREG_A (1 << SSP_A) /* AH only */
189 #define KRREG_ST (1 << SSP_ST)
190 #define KRREG_STACK (1 << SSP_STACK)
191 #define KRREG_PC (1 << SSP_PC)
192 #define KRREG_P (1 << SSP_P)
193 #define KRREG_PR0 (1 << 8)
194 #define KRREG_PR4 (1 << 12)
195 #define KRREG_AL (1 << 16)
196 #define KRREG_PMCM (1 << 18) /* only mode word of PMC */
197 #define KRREG_PMC (1 << 19)
198 #define KRREG_PM0R (1 << 20)
199 #define KRREG_PM1R (1 << 21)
200 #define KRREG_PM2R (1 << 22)
201 #define KRREG_PM3R (1 << 23)
202 #define KRREG_PM4R (1 << 24)
203 #define KRREG_PM0W (1 << 25)
204 #define KRREG_PM1W (1 << 26)
205 #define KRREG_PM2W (1 << 27)
206 #define KRREG_PM3W (1 << 28)
207 #define KRREG_PM4W (1 << 29)
209 /* bitfield of known register values */
210 static u32 known_regb = 0;
212 /* known vals, which need to be flushed
213 * (only ST, P, r0-r7, PMCx, PMxR, PMxW)
214 * ST means flags are being held in ARM PSR
215 * P means that it needs to be recalculated
217 static u32 dirty_regb = 0;
219 /* known values of host regs.
221 * 000000-00ffff - 16bit value
222 * 100000-10ffff - base reg (r7) + 16bit val
223 * 0r0000 - means reg (low) eq gr[r].h, r != AL
225 static int hostreg_r[4];
227 static void hostreg_clear(void)
230 for (i = 0; i < 4; i++)
234 static void hostreg_sspreg_changed(int sspreg)
237 for (i = 0; i < 4; i++)
238 if (hostreg_r[i] == (sspreg<<16)) hostreg_r[i] = -1;
242 #define PROGRAM(x) ((unsigned short *)svp->iram_rom)[x]
243 #define PROGRAM_P(x) ((unsigned short *)svp->iram_rom + (x))
245 void tr_unhandled(void)
247 //FILE *f = fopen("tcache.bin", "wb");
248 //fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
250 elprintf(EL_ANOMALY, "unhandled @ %04x\n", known_regs.gr[SSP_PC].h<<1);
254 /* update P, if needed. Trashes r0 */
255 static void tr_flush_dirty_P(void)
258 if (!(dirty_regb & KRREG_P)) return;
259 EOP_MOV_REG_ASR(10, 4, 16); // mov r10, r4, asr #16
260 EOP_MOV_REG_LSL( 0, 4, 16); // mov r0, r4, lsl #16
261 EOP_MOV_REG_ASR( 0, 0, 15); // mov r0, r0, asr #15
262 EOP_MUL(10, 0, 10); // mul r10, r0, r10
263 dirty_regb &= ~KRREG_P;
267 /* write dirty pr to host reg. Nothing is trashed */
268 static void tr_flush_dirty_pr(int r)
272 if (!(dirty_regb & (1 << (r+8)))) return;
275 case 0: ror = 0; break;
276 case 1: ror = 24/2; break;
277 case 2: ror = 16/2; break;
279 reg = (r < 4) ? 8 : 9;
280 EOP_BIC_IMM(reg,reg,ror,0xff);
281 if (known_regs.r[r] != 0)
282 EOP_ORR_IMM(reg,reg,ror,known_regs.r[r]);
283 dirty_regb &= ~(1 << (r+8));
286 /* write all dirty pr0-pr7 to host regs. Nothing is trashed */
287 static void tr_flush_dirty_prs(void)
290 int dirty = dirty_regb >> 8;
291 if ((dirty&7) == 7) {
292 emith_move_r_imm(8, known_regs.r[0]|(known_regs.r[1]<<8)|(known_regs.r[2]<<16));
295 if ((dirty&0x70) == 0x70) {
296 emith_move_r_imm(9, known_regs.r[4]|(known_regs.r[5]<<8)|(known_regs.r[6]<<16));
300 for (i = 0; dirty && i < 8; i++, dirty >>= 1)
302 if (!(dirty&1)) continue;
304 case 0: ror = 0; break;
305 case 1: ror = 24/2; break;
306 case 2: ror = 16/2; break;
308 reg = (i < 4) ? 8 : 9;
309 EOP_BIC_IMM(reg,reg,ror,0xff);
310 if (known_regs.r[i] != 0)
311 EOP_ORR_IMM(reg,reg,ror,known_regs.r[i]);
313 dirty_regb &= ~0xff00;
316 /* write dirty pr and "forget" it. Nothing is trashed. */
317 static void tr_release_pr(int r)
319 tr_flush_dirty_pr(r);
320 known_regb &= ~(1 << (r+8));
323 /* fush ARM PSR to r6. Trashes r1 */
324 static void tr_flush_dirty_ST(void)
326 if (!(dirty_regb & KRREG_ST)) return;
327 EOP_BIC_IMM(6,6,0,0x0f);
329 EOP_ORR_REG_LSR(6,6,1,28);
330 dirty_regb &= ~KRREG_ST;
334 /* inverse of above. Trashes r1 */
335 static void tr_make_dirty_ST(void)
337 if (dirty_regb & KRREG_ST) return;
338 if (known_regb & KRREG_ST) {
340 if (known_regs.gr[SSP_ST].h & SSP_FLAG_N) flags |= 8;
341 if (known_regs.gr[SSP_ST].h & SSP_FLAG_Z) flags |= 4;
342 EOP_MSR_IMM(4/2, flags);
344 EOP_MOV_REG_LSL(1, 6, 28);
348 dirty_regb |= KRREG_ST;
351 /* load 16bit val into host reg r0-r3. Nothing is trashed */
352 static void tr_mov16(int r, int val)
354 if (hostreg_r[r] != val) {
355 emith_move_r_imm(r, val);
360 static void tr_mov16_cond(int cond, int r, int val)
362 emith_op_imm(cond, 0, A_OP_MOV, r, val);
367 static void tr_flush_dirty_pmcrs(void)
369 u32 i, val = (u32)-1;
370 if (!(dirty_regb & 0x3ff80000)) return;
372 if (dirty_regb & KRREG_PMC) {
373 val = known_regs.pmc.v;
374 emith_move_r_imm(1, val);
375 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
377 if (known_regs.emu_status & (SSP_PMC_SET|SSP_PMC_HAVE_ADDR)) {
378 elprintf(EL_ANOMALY, "!! SSP_PMC_SET|SSP_PMC_HAVE_ADDR set on flush\n");
382 for (i = 0; i < 5; i++)
384 if (dirty_regb & (1 << (20+i))) {
385 if (val != known_regs.pmac_read[i]) {
386 val = known_regs.pmac_read[i];
387 emith_move_r_imm(1, val);
389 EOP_STR_IMM(1,7,0x454+i*4); // pmac_read
391 if (dirty_regb & (1 << (25+i))) {
392 if (val != known_regs.pmac_write[i]) {
393 val = known_regs.pmac_write[i];
394 emith_move_r_imm(1, val);
396 EOP_STR_IMM(1,7,0x46c+i*4); // pmac_write
399 dirty_regb &= ~0x3ff80000;
403 /* read bank word to r0 (upper bits zero). Thrashes r1. */
404 static void tr_bank_read(int addr) /* word addr 0-0x1ff */
408 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
409 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
410 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
414 EOP_LDRH_IMM(0,breg,(addr&0x7f)<<1); // ldrh r0, [r1, (op&0x7f)<<1]
418 /* write r0 to bank. Trashes r1. */
419 static void tr_bank_write(int addr)
423 if (hostreg_r[1] != (0x100000|((addr&0x180)<<1))) {
424 EOP_ADD_IMM(1,7,30/2,(addr&0x180)>>1); // add r1, r7, ((op&0x180)<<1)
425 hostreg_r[1] = 0x100000|((addr&0x180)<<1);
429 EOP_STRH_IMM(0,breg,(addr&0x7f)<<1); // strh r0, [r1, (op&0x7f)<<1]
432 /* handle RAM bank pointer modifiers. if need_modulo, trash r1-r3, else nothing */
433 static void tr_ptrr_mod(int r, int mod, int need_modulo, int count)
435 int modulo_shift = -1; /* unknown */
437 if (mod == 0) return;
439 if (!need_modulo || mod == 1) // +!
441 else if (need_modulo && (known_regb & KRREG_ST)) {
442 modulo_shift = known_regs.gr[SSP_ST].h & 7;
443 if (modulo_shift == 0) modulo_shift = 8;
446 if (modulo_shift == -1)
448 int reg = (r < 4) ? 8 : 9;
450 if (dirty_regb & KRREG_ST) {
451 // avoid flushing ARM flags
452 EOP_AND_IMM(1, 6, 0, 0x70);
453 EOP_SUB_IMM(1, 1, 0, 0x10);
454 EOP_AND_IMM(1, 1, 0, 0x70);
455 EOP_ADD_IMM(1, 1, 0, 0x10);
457 EOP_C_DOP_IMM(A_COND_AL,A_OP_AND,1,6,1,0,0x70); // ands r1, r6, #0x70
458 EOP_C_DOP_IMM(A_COND_EQ,A_OP_MOV,0,0,1,0,0x80); // moveq r1, #0x80
460 EOP_MOV_REG_LSR(1, 1, 4); // mov r1, r1, lsr #4
461 EOP_RSB_IMM(2, 1, 0, 8); // rsb r1, r1, #8
462 EOP_MOV_IMM(3, 8/2, count); // mov r3, #0x01000000
464 EOP_ADD_IMM(1, 1, 0, (r&3)*8); // add r1, r1, #(r&3)*8
465 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
467 EOP_SUB_REG2_LSL(reg,reg,3,2); // sub reg, reg, #0x01000000 << r2
468 else EOP_ADD_REG2_LSL(reg,reg,3,2);
469 EOP_RSB_IMM(1, 1, 0, 32); // rsb r1, r1, #32
470 EOP_MOV_REG2_ROR(reg,reg,1); // mov reg, reg, ror r1
471 hostreg_r[1] = hostreg_r[2] = hostreg_r[3] = -1;
473 else if (known_regb & (1 << (r + 8)))
475 int modulo = (1 << modulo_shift) - 1;
477 known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] - count) & modulo);
478 else known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] + count) & modulo);
482 int reg = (r < 4) ? 8 : 9;
483 int ror = ((r&3) + 1)*8 - (8 - modulo_shift);
484 EOP_MOV_REG_ROR(reg,reg,ror);
485 // {add|sub} reg, reg, #1<<shift
486 EOP_C_DOP_IMM(A_COND_AL,(mod==2)?A_OP_SUB:A_OP_ADD,0,reg,reg, 8/2, count << (8 - modulo_shift));
487 EOP_MOV_REG_ROR(reg,reg,32-ror);
491 /* handle writes r0 to (rX). Trashes r1.
492 * fortunately we can ignore modulo increment modes for writes. */
493 static void tr_rX_write(int op)
497 int mod = (op>>2) & 3; // direct addressing
498 tr_bank_write((op & 0x100) + mod);
502 int r = (op&3) | ((op>>6)&4);
503 if (known_regb & (1 << (r + 8))) {
504 tr_bank_write((op&0x100) | known_regs.r[r]);
506 int reg = (r < 4) ? 8 : 9;
507 int ror = ((4 - (r&3))*8) & 0x1f;
508 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
510 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
511 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
512 else EOP_ADD_REG_LSL(1,7,1,1);
513 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
516 tr_ptrr_mod(r, (op>>2) & 3, 0, 1);
520 /* read (rX) to r0. Trashes r1-r3. */
521 static void tr_rX_read(int r, int mod)
525 tr_bank_read(((r << 6) & 0x100) + mod); // direct addressing
529 if (known_regb & (1 << (r + 8))) {
530 tr_bank_read(((r << 6) & 0x100) | known_regs.r[r]);
532 int reg = (r < 4) ? 8 : 9;
533 int ror = ((4 - (r&3))*8) & 0x1f;
534 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
536 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
537 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
538 else EOP_ADD_REG_LSL(1,7,1,1);
539 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
540 hostreg_r[0] = hostreg_r[1] = -1;
542 tr_ptrr_mod(r, mod, 1, 1);
546 /* read ((rX)) to r0. Trashes r1,r2. */
547 static void tr_rX_read2(int op)
549 int r = (op&3) | ((op>>6)&4); // src
552 tr_bank_read((op&0x100) | ((op>>2)&3));
553 } else if (known_regb & (1 << (r+8))) {
554 tr_bank_read((op&0x100) | known_regs.r[r]);
556 int reg = (r < 4) ? 8 : 9;
557 int ror = ((4 - (r&3))*8) & 0x1f;
558 EOP_AND_IMM(1,reg,ror/2,0xff); // and r1, r{7,8}, <mask>
560 EOP_ORR_IMM(1,1,((ror-8)&0x1f)/2,1); // orr r1, r1, 1<<shift
561 if (r&3) EOP_ADD_REG_LSR(1,7,1, (r&3)*8-1); // add r1, r7, r1, lsr #lsr
562 else EOP_ADD_REG_LSL(1,7,1,1);
563 EOP_LDRH_SIMPLE(0,1); // ldrh r0, [r1]
565 EOP_LDR_IMM(2,7,0x48c); // ptr_iram_rom
566 EOP_ADD_REG_LSL(2,2,0,1); // add r2, r2, r0, lsl #1
567 EOP_ADD_IMM(0,0,0,1); // add r0, r0, #1
569 tr_bank_write((op&0x100) | ((op>>2)&3));
570 } else if (known_regb & (1 << (r+8))) {
571 tr_bank_write((op&0x100) | known_regs.r[r]);
573 EOP_STRH_SIMPLE(0,1); // strh r0, [r1]
576 EOP_LDRH_SIMPLE(0,2); // ldrh r0, [r2]
577 hostreg_r[0] = hostreg_r[2] = -1;
580 // check if AL is going to be used later in block
581 static int tr_predict_al_need(void)
583 int tmpv, tmpv2, op, pc = known_regs.gr[SSP_PC].h;
592 tmpv2 = (op >> 4) & 0xf; // dst
593 tmpv = op & 0xf; // src
594 if ((tmpv2 == SSP_A && tmpv == SSP_P) || tmpv2 == SSP_AL) // ld A, P; ld AL, *
603 case 0x10: case 0x30: case 0x40: case 0x60: case 0x70:
604 tmpv = op & 0xf; // src
605 if (tmpv == SSP_AL) // OP *, AL
615 case 0x74: pc++; break;
625 // mpya (rj), (ri), b
629 case 0x5b: return 0; // cleared anyway
633 tmpv = op & 0xf; // src
634 if (tmpv == SSP_AL) return 1;
635 case 0x51: case 0x53: case 0x54: case 0x55: case 0x59: case 0x5c:
643 /* get ARM cond which would mean that SSP cond is satisfied. No trash. */
644 static int tr_cond_check(int op)
646 int f = (op & 0x100) >> 8;
648 case 0x00: return A_COND_AL; /* always true */
649 case 0x50: /* Z matches f(?) bit */
650 if (dirty_regb & KRREG_ST) return f ? A_COND_EQ : A_COND_NE;
651 EOP_TST_IMM(6, 0, 4);
652 return f ? A_COND_NE : A_COND_EQ;
653 case 0x70: /* N matches f(?) bit */
654 if (dirty_regb & KRREG_ST) return f ? A_COND_MI : A_COND_PL;
655 EOP_TST_IMM(6, 0, 8);
656 return f ? A_COND_NE : A_COND_EQ;
658 elprintf(EL_ANOMALY, "unimplemented cond?\n");
664 static int tr_neg_cond(int cond)
667 case A_COND_AL: elprintf(EL_ANOMALY, "neg for AL?\n"); exit(1);
668 case A_COND_EQ: return A_COND_NE;
669 case A_COND_NE: return A_COND_EQ;
670 case A_COND_MI: return A_COND_PL;
671 case A_COND_PL: return A_COND_MI;
672 default: elprintf(EL_ANOMALY, "bad cond for neg\n"); exit(1);
677 static int tr_aop_ssp2arm(int op)
680 case 1: return A_OP_SUB;
681 case 3: return A_OP_CMP;
682 case 4: return A_OP_ADD;
683 case 5: return A_OP_AND;
684 case 6: return A_OP_ORR;
685 case 7: return A_OP_EOR;
693 /* spacial version of call for calling C needed on ios, since we use r9.. */
694 static void emith_call_c_func(void *target)
696 EOP_STMFD_SP(A_R7M|A_R9M);
698 EOP_LDMFD_SP(A_R7M|A_R9M);
701 #define emith_call_c_func emith_call
704 // -----------------------------------------------------
708 //@ r6: STACK and emu flags
712 // read general reg to r0. Trashes r1
713 static void tr_GR0_to_r0(int op)
718 static void tr_X_to_r0(int op)
720 if (hostreg_r[0] != (SSP_X<<16)) {
721 EOP_MOV_REG_LSR(0, 4, 16); // mov r0, r4, lsr #16
722 hostreg_r[0] = SSP_X<<16;
726 static void tr_Y_to_r0(int op)
728 if (hostreg_r[0] != (SSP_Y<<16)) {
729 EOP_MOV_REG_SIMPLE(0, 4); // mov r0, r4
730 hostreg_r[0] = SSP_Y<<16;
734 static void tr_A_to_r0(int op)
736 if (hostreg_r[0] != (SSP_A<<16)) {
737 EOP_MOV_REG_LSR(0, 5, 16); // mov r0, r5, lsr #16 @ AH
738 hostreg_r[0] = SSP_A<<16;
742 static void tr_ST_to_r0(int op)
744 // VR doesn't need much accuracy here..
745 EOP_MOV_REG_LSR(0, 6, 4); // mov r0, r6, lsr #4
746 EOP_AND_IMM(0, 0, 0, 0x67); // and r0, r0, #0x67
750 static void tr_STACK_to_r0(int op)
753 EOP_SUB_IMM(6, 6, 8/2, 0x20); // sub r6, r6, #1<<29
754 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
755 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
756 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
757 EOP_LDRH_SIMPLE(0, 1); // ldrh r0, [r1]
758 hostreg_r[0] = hostreg_r[1] = -1;
761 static void tr_PC_to_r0(int op)
763 tr_mov16(0, known_regs.gr[SSP_PC].h);
766 static void tr_P_to_r0(int op)
769 EOP_MOV_REG_LSR(0, 10, 16); // mov r0, r10, lsr #16
773 static void tr_AL_to_r0(int op)
776 if (known_regb & KRREG_PMC) {
777 known_regs.emu_status &= ~(SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
779 EOP_LDR_IMM(0,7,0x484); // ldr r1, [r7, #0x484] // emu_status
780 EOP_BIC_IMM(0,0,0,SSP_PMC_SET|SSP_PMC_HAVE_ADDR);
781 EOP_STR_IMM(0,7,0x484);
785 if (hostreg_r[0] != (SSP_AL<<16)) {
786 EOP_MOV_REG_SIMPLE(0, 5); // mov r0, r5
787 hostreg_r[0] = SSP_AL<<16;
791 static void tr_PMX_to_r0(int reg)
793 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
795 known_regs.pmac_read[reg] = known_regs.pmc.v;
796 known_regs.emu_status &= ~SSP_PMC_SET;
797 known_regb |= 1 << (20+reg);
798 dirty_regb |= 1 << (20+reg);
802 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (20+reg))))
804 u32 pmcv = known_regs.pmac_read[reg];
806 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
808 if ((mode & 0xfff0) == 0x0800)
810 EOP_LDR_IMM(1,7,0x488); // rom_ptr
811 emith_move_r_imm(0, (pmcv&0xfffff)<<1);
812 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
813 known_regs.pmac_read[reg] += 1;
815 else if ((mode & 0x47ff) == 0x0018) // DRAM
817 int inc = get_inc(mode);
818 EOP_LDR_IMM(1,7,0x490); // dram_ptr
819 emith_move_r_imm(0, (pmcv&0xffff)<<1);
820 EOP_LDRH_REG(0,1,0); // ldrh r0, [r1, r0]
821 if (reg == 4 && (pmcv == 0x187f03 || pmcv == 0x187f04)) // wait loop detection
823 int flag = (pmcv == 0x187f03) ? SSP_WAIT_30FE06 : SSP_WAIT_30FE08;
825 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
826 EOP_TST_REG_SIMPLE(0,0);
827 EOP_C_DOP_IMM(A_COND_EQ,A_OP_SUB,0,11,11,22/2,1); // subeq r11, r11, #1024
828 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1,24/2,flag>>8); // orreq r1, r1, #SSP_WAIT_30FE08
829 EOP_STR_IMM(1,7,0x484); // str r1, [r7, #0x484] // emu_status
831 known_regs.pmac_read[reg] += inc;
837 known_regs.pmc.v = known_regs.pmac_read[reg];
838 //known_regb |= KRREG_PMC;
839 dirty_regb |= KRREG_PMC;
840 dirty_regb |= 1 << (20+reg);
841 hostreg_r[0] = hostreg_r[1] = -1;
845 known_regb &= ~KRREG_PMC;
846 dirty_regb &= ~KRREG_PMC;
847 known_regb &= ~(1 << (20+reg));
848 dirty_regb &= ~(1 << (20+reg));
850 // call the C code to handle this
852 //tr_flush_dirty_pmcrs();
854 emith_call_c_func(ssp_pm_read);
858 static void tr_PM0_to_r0(int op)
863 static void tr_PM1_to_r0(int op)
868 static void tr_PM2_to_r0(int op)
873 static void tr_XST_to_r0(int op)
875 EOP_ADD_IMM(0, 7, 24/2, 4); // add r0, r7, #0x400
876 EOP_LDRH_IMM(0, 0, SSP_XST*4+2);
879 static void tr_PM4_to_r0(int op)
884 static void tr_PMC_to_r0(int op)
886 if (known_regb & KRREG_PMC)
888 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
889 known_regs.emu_status |= SSP_PMC_SET;
890 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
891 // do nothing - this is handled elsewhere
893 tr_mov16(0, known_regs.pmc.l);
894 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
899 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
902 EOP_LDR_IMM(0, 7, 0x400+SSP_PMC*4);
903 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
904 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
905 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
906 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
907 EOP_STR_IMM(1,7,0x484);
908 hostreg_r[0] = hostreg_r[1] = -1;
913 typedef void (tr_read_func)(int op);
915 static tr_read_func *tr_read_funcs[16] =
930 (tr_read_func *)tr_unhandled,
936 // write r0 to general reg handlers. Trashes r1
937 #define TR_WRITE_R0_TO_REG(reg) \
939 hostreg_sspreg_changed(reg); \
940 hostreg_r[0] = (reg)<<16; \
941 if (const_val != -1) { \
942 known_regs.gr[reg].h = const_val; \
943 known_regb |= 1 << (reg); \
945 known_regb &= ~(1 << (reg)); \
949 static void tr_r0_to_GR0(int const_val)
954 static void tr_r0_to_X(int const_val)
956 EOP_MOV_REG_LSL(4, 4, 16); // mov r4, r4, lsl #16
957 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
958 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
959 dirty_regb |= KRREG_P; // touching X or Y makes P dirty.
960 TR_WRITE_R0_TO_REG(SSP_X);
963 static void tr_r0_to_Y(int const_val)
965 EOP_MOV_REG_LSR(4, 4, 16); // mov r4, r4, lsr #16
966 EOP_ORR_REG_LSL(4, 4, 0, 16); // orr r4, r4, r0, lsl #16
967 EOP_MOV_REG_ROR(4, 4, 16); // mov r4, r4, ror #16
968 dirty_regb |= KRREG_P;
969 TR_WRITE_R0_TO_REG(SSP_Y);
972 static void tr_r0_to_A(int const_val)
974 if (tr_predict_al_need()) {
975 EOP_MOV_REG_LSL(5, 5, 16); // mov r5, r5, lsl #16
976 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16 @ AL
977 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
980 EOP_MOV_REG_LSL(5, 0, 16);
981 TR_WRITE_R0_TO_REG(SSP_A);
984 static void tr_r0_to_ST(int const_val)
986 // VR doesn't need much accuracy here..
987 EOP_AND_IMM(1, 0, 0, 0x67); // and r1, r0, #0x67
988 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
989 EOP_ORR_REG_LSL(6, 6, 1, 4); // orr r6, r6, r1, lsl #4
990 TR_WRITE_R0_TO_REG(SSP_ST);
992 dirty_regb &= ~KRREG_ST;
995 static void tr_r0_to_STACK(int const_val)
998 EOP_ADD_IMM(1, 7, 24/2, 0x04); // add r1, r7, 0x400
999 EOP_ADD_IMM(1, 1, 0, 0x48); // add r1, r1, 0x048
1000 EOP_ADD_REG_LSR(1, 1, 6, 28); // add r1, r1, r6, lsr #28
1001 EOP_STRH_SIMPLE(0, 1); // strh r0, [r1]
1002 EOP_ADD_IMM(6, 6, 8/2, 0x20); // add r6, r6, #1<<29
1006 static void tr_r0_to_PC(int const_val)
1009 * do nothing - dispatcher will take care of this
1010 EOP_MOV_REG_LSL(1, 0, 16); // mov r1, r0, lsl #16
1011 EOP_STR_IMM(1,7,0x400+6*4); // str r1, [r7, #(0x400+6*8)]
1016 static void tr_r0_to_AL(int const_val)
1018 EOP_MOV_REG_LSR(5, 5, 16); // mov r5, r5, lsr #16
1019 EOP_ORR_REG_LSL(5, 5, 0, 16); // orr r5, r5, r0, lsl #16
1020 EOP_MOV_REG_ROR(5, 5, 16); // mov r5, r5, ror #16
1021 hostreg_sspreg_changed(SSP_AL);
1022 if (const_val != -1) {
1023 known_regs.gr[SSP_A].l = const_val;
1024 known_regb |= 1 << SSP_AL;
1026 known_regb &= ~(1 << SSP_AL);
1029 static void tr_r0_to_PMX(int reg)
1031 if ((known_regb & KRREG_PMC) && (known_regs.emu_status & SSP_PMC_SET))
1033 known_regs.pmac_write[reg] = known_regs.pmc.v;
1034 known_regs.emu_status &= ~SSP_PMC_SET;
1035 known_regb |= 1 << (25+reg);
1036 dirty_regb |= 1 << (25+reg);
1040 if ((known_regb & KRREG_PMC) && (known_regb & (1 << (25+reg))))
1044 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1046 mode = known_regs.pmac_write[reg]>>16;
1047 addr = known_regs.pmac_write[reg]&0xffff;
1048 if ((mode & 0x43ff) == 0x0018) // DRAM
1050 int inc = get_inc(mode);
1051 if (mode & 0x0400) tr_unhandled();
1052 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1053 emith_move_r_imm(2, addr << 1);
1054 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1055 known_regs.pmac_write[reg] += inc;
1057 else if ((mode & 0xfbff) == 0x4018) // DRAM, cell inc
1059 if (mode & 0x0400) tr_unhandled();
1060 EOP_LDR_IMM(1,7,0x490); // dram_ptr
1061 emith_move_r_imm(2, addr << 1);
1062 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1063 known_regs.pmac_write[reg] += (addr&1) ? 31 : 1;
1065 else if ((mode & 0x47ff) == 0x001c) // IRAM
1067 int inc = get_inc(mode);
1068 EOP_LDR_IMM(1,7,0x48c); // iram_ptr
1069 emith_move_r_imm(2, (addr&0x3ff) << 1);
1070 EOP_STRH_REG(0,1,2); // strh r0, [r1, r2]
1072 EOP_STR_IMM(1,7,0x494); // iram_dirty
1073 known_regs.pmac_write[reg] += inc;
1078 known_regs.pmc.v = known_regs.pmac_write[reg];
1079 //known_regb |= KRREG_PMC;
1080 dirty_regb |= KRREG_PMC;
1081 dirty_regb |= 1 << (25+reg);
1082 hostreg_r[1] = hostreg_r[2] = -1;
1086 known_regb &= ~KRREG_PMC;
1087 dirty_regb &= ~KRREG_PMC;
1088 known_regb &= ~(1 << (25+reg));
1089 dirty_regb &= ~(1 << (25+reg));
1091 // call the C code to handle this
1092 tr_flush_dirty_ST();
1093 //tr_flush_dirty_pmcrs();
1095 emith_call_c_func(ssp_pm_write);
1099 static void tr_r0_to_PM0(int const_val)
1104 static void tr_r0_to_PM1(int const_val)
1109 static void tr_r0_to_PM2(int const_val)
1114 static void tr_r0_to_PM4(int const_val)
1119 static void tr_r0_to_PMC(int const_val)
1121 if ((known_regb & KRREG_PMC) && const_val != -1)
1123 if (known_regs.emu_status & SSP_PMC_HAVE_ADDR) {
1124 known_regs.emu_status |= SSP_PMC_SET;
1125 known_regs.emu_status &= ~SSP_PMC_HAVE_ADDR;
1126 known_regs.pmc.h = const_val;
1128 known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
1129 known_regs.pmc.l = const_val;
1134 tr_flush_dirty_ST();
1135 if (known_regb & KRREG_PMC) {
1136 emith_move_r_imm(1, known_regs.pmc.v);
1137 EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
1138 known_regb &= ~KRREG_PMC;
1139 dirty_regb &= ~KRREG_PMC;
1141 EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
1142 EOP_ADD_IMM(2,7,24/2,4); // add r2, r7, #0x400
1143 EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
1144 EOP_C_AM3_IMM(A_COND_EQ,1,0,2,0,0,1,SSP_PMC*4); // strxx r0, [r2, #SSP_PMC]
1145 EOP_C_AM3_IMM(A_COND_NE,1,0,2,0,0,1,SSP_PMC*4+2);
1146 EOP_C_DOP_IMM(A_COND_EQ,A_OP_ORR,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // orreq r1, r1, #..
1147 EOP_C_DOP_IMM(A_COND_NE,A_OP_BIC,0, 1, 1, 0, SSP_PMC_HAVE_ADDR); // bicne r1, r1, #..
1148 EOP_C_DOP_IMM(A_COND_NE,A_OP_ORR,0, 1, 1, 0, SSP_PMC_SET); // orrne r1, r1, #..
1149 EOP_STR_IMM(1,7,0x484);
1150 hostreg_r[1] = hostreg_r[2] = -1;
1154 typedef void (tr_write_func)(int const_val);
1156 static tr_write_func *tr_write_funcs[16] =
1165 (tr_write_func *)tr_unhandled,
1169 (tr_write_func *)tr_unhandled,
1171 (tr_write_func *)tr_unhandled,
1176 static void tr_mac_load_XY(int op)
1178 tr_rX_read(op&3, (op>>2)&3); // X
1179 EOP_MOV_REG_LSL(4, 0, 16);
1180 tr_rX_read(((op>>4)&3)|4, (op>>6)&3); // Y
1181 EOP_ORR_REG_SIMPLE(4, 0);
1182 dirty_regb |= KRREG_P;
1183 hostreg_sspreg_changed(SSP_X);
1184 hostreg_sspreg_changed(SSP_Y);
1185 known_regb &= ~KRREG_X;
1186 known_regb &= ~KRREG_Y;
1189 // -----------------------------------------------------
1191 static int tr_detect_set_pm(unsigned int op, int *pc, int imm)
1194 if (!((op&0xfef0) == 0x08e0 && (PROGRAM(*pc)&0xfef0) == 0x08e0)) return 0;
1200 pmcv = imm | (PROGRAM((*pc)++) << 16);
1201 known_regs.pmc.v = pmcv;
1202 known_regb |= KRREG_PMC;
1203 dirty_regb |= KRREG_PMC;
1204 known_regs.emu_status |= SSP_PMC_SET;
1207 // check for possible reg programming
1208 tmpv = PROGRAM(*pc);
1209 if ((tmpv & 0xfff8) == 0x08 || (tmpv & 0xff8f) == 0x80)
1211 int is_write = (tmpv & 0xff8f) == 0x80;
1212 int reg = is_write ? ((tmpv>>4)&0x7) : (tmpv&0x7);
1213 if (reg > 4) tr_unhandled();
1214 if ((tmpv & 0x0f) != 0 && (tmpv & 0xf0) != 0) tr_unhandled();
1216 known_regs.pmac_write[reg] = pmcv;
1218 known_regs.pmac_read[reg] = pmcv;
1219 known_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1220 dirty_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
1221 known_regs.emu_status &= ~SSP_PMC_SET;
1231 static const short pm0_block_seq[] = { 0x0880, 0, 0x0880, 0, 0x0840, 0x60 };
1233 static int tr_detect_pm0_block(unsigned int op, int *pc, int imm)
1240 if (op != 0x0840 || imm != 0) return 0;
1241 pp = PROGRAM_P(*pc);
1242 if (memcmp(pp, pm0_block_seq, sizeof(pm0_block_seq)) != 0) return 0;
1244 EOP_AND_IMM(6, 6, 8/2, 0xe0); // and r6, r6, #7<<29 @ preserve STACK
1245 EOP_ORR_IMM(6, 6, 24/2, 6); // orr r6, r6, 0x600
1246 hostreg_sspreg_changed(SSP_ST);
1247 known_regs.gr[SSP_ST].h = 0x60;
1248 known_regb |= 1 << SSP_ST;
1249 dirty_regb &= ~KRREG_ST;
1255 static int tr_detect_rotate(unsigned int op, int *pc, int imm)
1261 if (op != 0x02e3 || PROGRAM(*pc) != 0x04e3 || PROGRAM(*pc + 1) != 0x000f) return 0;
1264 EOP_MOV_REG_LSL(0, 0, 4);
1265 EOP_ORR_REG_LSR(0, 0, 0, 16);
1272 // -----------------------------------------------------
1274 static int translate_op(unsigned int op, int *pc, int imm, int *end_cond, int *jump_pc)
1276 u32 tmpv, tmpv2, tmpv3;
1278 known_regs.gr[SSP_PC].h = *pc;
1284 if (op == 0) { ret++; break; } // nop
1285 tmpv = op & 0xf; // src
1286 tmpv2 = (op >> 4) & 0xf; // dst
1287 if (tmpv2 == SSP_A && tmpv == SSP_P) { // ld A, P
1289 EOP_MOV_REG_SIMPLE(5, 10);
1290 hostreg_sspreg_changed(SSP_A);
1291 known_regb &= ~(KRREG_A|KRREG_AL);
1294 tr_read_funcs[tmpv](op);
1295 tr_write_funcs[tmpv2]((known_regb & (1 << tmpv)) ? known_regs.gr[tmpv].h : -1);
1296 if (tmpv2 == SSP_PC) {
1298 *end_cond = -A_COND_AL;
1304 int r = (op&3) | ((op>>6)&4);
1305 int mod = (op>>2)&3;
1306 tmpv = (op >> 4) & 0xf; // dst
1307 ret = tr_detect_rotate(op, pc, imm);
1313 while (PROGRAM(*pc) == op) {
1314 (*pc)++; cnt++; ret++;
1317 tr_ptrr_mod(r, mod, 1, cnt); // skip
1319 tr_write_funcs[tmpv](-1);
1320 if (tmpv == SSP_PC) {
1322 *end_cond = -A_COND_AL;
1329 tmpv = (op >> 4) & 0xf; // src
1330 tr_read_funcs[tmpv](op);
1336 tr_bank_read(op&0x1ff);
1342 tmpv = (op & 0xf0) >> 4; // dst
1343 ret = tr_detect_pm0_block(op, pc, imm);
1345 ret = tr_detect_set_pm(op, pc, imm);
1348 tr_write_funcs[tmpv](imm);
1349 if (tmpv == SSP_PC) {
1357 tmpv2 = (op >> 4) & 0xf; // dst
1359 tr_write_funcs[tmpv2](-1);
1360 if (tmpv2 == SSP_PC) {
1362 *end_cond = -A_COND_AL;
1375 tr_bank_write(op&0x1ff);
1381 r = (op&3) | ((op>>6)&4); // src
1382 tmpv2 = (op >> 4) & 0xf; // dst
1383 if ((r&3) == 3) tr_unhandled();
1385 if (known_regb & (1 << (r+8))) {
1386 tr_mov16(0, known_regs.r[r]);
1387 tr_write_funcs[tmpv2](known_regs.r[r]);
1389 int reg = (r < 4) ? 8 : 9;
1390 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1391 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1393 tr_write_funcs[tmpv2](-1);
1401 r = (op&3) | ((op>>6)&4); // dst
1402 tmpv = (op >> 4) & 0xf; // src
1403 if ((r&3) == 3) tr_unhandled();
1405 if (known_regb & (1 << tmpv)) {
1406 known_regs.r[r] = known_regs.gr[tmpv].h;
1407 known_regb |= 1 << (r + 8);
1408 dirty_regb |= 1 << (r + 8);
1410 int reg = (r < 4) ? 8 : 9;
1411 int ror = ((4 - (r&3))*8) & 0x1f;
1412 tr_read_funcs[tmpv](op);
1413 EOP_BIC_IMM(reg, reg, ror/2, 0xff); // bic r{7,8}, r{7,8}, <mask>
1414 EOP_AND_IMM(0, 0, 0, 0xff); // and r0, r0, 0xff
1415 EOP_ORR_REG_LSL(reg, reg, 0, (r&3)*8); // orr r{7,8}, r{7,8}, r0, lsl #lsl
1417 known_regb &= ~(1 << (r+8));
1418 dirty_regb &= ~(1 << (r+8));
1424 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
1426 known_regs.r[tmpv] = op;
1427 known_regb |= 1 << (tmpv + 8);
1428 dirty_regb |= 1 << (tmpv + 8);
1433 u32 *jump_op = NULL;
1434 tmpv = tr_cond_check(op);
1435 if (tmpv != A_COND_AL) {
1436 jump_op = tcache_ptr;
1437 EOP_MOV_IMM(0, 0, 0); // placeholder for branch
1440 tr_r0_to_STACK(*pc);
1441 if (tmpv != A_COND_AL) {
1442 u32 *real_ptr = tcache_ptr;
1443 tcache_ptr = jump_op;
1444 EOP_C_B(tr_neg_cond(tmpv),0,real_ptr - jump_op - 2);
1445 tcache_ptr = real_ptr;
1447 tr_mov16_cond(tmpv, 0, imm);
1448 if (tmpv != A_COND_AL)
1449 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1450 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1459 tmpv2 = (op >> 4) & 0xf; // dst
1461 EOP_LDR_IMM(1,7,0x48c); // ptr_iram_rom
1462 EOP_ADD_REG_LSL(0,1,0,1); // add r0, r1, r0, lsl #1
1463 EOP_LDRH_SIMPLE(0,0); // ldrh r0, [r0]
1464 hostreg_r[0] = hostreg_r[1] = -1;
1465 tr_write_funcs[tmpv2](-1);
1466 if (tmpv2 == SSP_PC) {
1468 *end_cond = -A_COND_AL;
1474 tmpv = tr_cond_check(op);
1475 tr_mov16_cond(tmpv, 0, imm);
1476 if (tmpv != A_COND_AL)
1477 tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
1478 tr_r0_to_PC(tmpv == A_COND_AL ? imm : -1);
1486 // check for repeats of this op
1488 while (PROGRAM(*pc) == op && (op & 7) != 6) {
1492 if ((op&0xf0) != 0) // !always
1495 tmpv2 = tr_cond_check(op);
1497 case 2: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_ASR,5); break; // shr (arithmetic)
1498 case 3: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_MOV,1,0,5,tmpv,A_AM1_LSL,5); break; // shl
1499 case 6: EOP_C_DOP_IMM(tmpv2,A_OP_RSB,1,5,5,0,0); break; // neg
1500 case 7: EOP_C_DOP_REG_XIMM(tmpv2,A_OP_EOR,0,5,1,31,A_AM1_ASR,5); // eor r1, r5, r5, asr #31
1501 EOP_C_DOP_REG_XIMM(tmpv2,A_OP_ADD,1,1,5,31,A_AM1_LSR,5); // adds r5, r1, r5, lsr #31
1502 hostreg_r[1] = -1; break; // abs
1503 default: tr_unhandled();
1506 hostreg_sspreg_changed(SSP_A);
1507 dirty_regb |= KRREG_ST;
1508 known_regb &= ~KRREG_ST;
1509 known_regb &= ~(KRREG_A|KRREG_AL);
1518 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_SUB,1,5,5,0,A_AM1_LSL,10); // subs r5, r5, r10
1519 hostreg_sspreg_changed(SSP_A);
1520 known_regb &= ~(KRREG_A|KRREG_AL);
1521 dirty_regb |= KRREG_ST;
1524 // mpya (rj), (ri), b
1529 EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_ADD,1,5,5,0,A_AM1_LSL,10); // adds r5, r5, r10
1530 hostreg_sspreg_changed(SSP_A);
1531 known_regb &= ~(KRREG_A|KRREG_AL);
1532 dirty_regb |= KRREG_ST;
1535 // mld (rj), (ri), b
1537 EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,1,0,5,0,0); // movs r5, #0
1538 hostreg_sspreg_changed(SSP_A);
1539 known_regs.gr[SSP_A].v = 0;
1540 known_regb |= (KRREG_A|KRREG_AL);
1541 dirty_regb |= KRREG_ST;
1552 tmpv = op & 0xf; // src
1553 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1554 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1555 if (tmpv == SSP_P) {
1557 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL,10); // OPs r5, r5, r10
1558 } else if (tmpv == SSP_A) {
1559 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3, 0,A_AM1_LSL, 5); // OPs r5, r5, r5
1561 tr_read_funcs[tmpv](op);
1562 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL, 0); // OPs r5, r5, r0, lsl #16
1564 hostreg_sspreg_changed(SSP_A);
1565 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1566 dirty_regb |= KRREG_ST;
1576 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1577 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1578 tr_rX_read((op&3)|((op>>6)&4), (op>>2)&3);
1579 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1580 hostreg_sspreg_changed(SSP_A);
1581 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1582 dirty_regb |= KRREG_ST;
1592 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1593 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1594 tr_bank_read(op&0x1ff);
1595 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1596 hostreg_sspreg_changed(SSP_A);
1597 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1598 dirty_regb |= KRREG_ST;
1608 tmpv = (op & 0xf0) >> 4;
1609 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1610 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1612 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1613 hostreg_sspreg_changed(SSP_A);
1614 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1615 dirty_regb |= KRREG_ST;
1625 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1626 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1628 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1629 hostreg_sspreg_changed(SSP_A);
1630 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1631 dirty_regb |= KRREG_ST;
1642 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1643 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1644 r = (op&3) | ((op>>6)&4); // src
1645 if ((r&3) == 3) tr_unhandled();
1647 if (known_regb & (1 << (r+8))) {
1648 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,known_regs.r[r]); // OPs r5, r5, #val<<16
1650 int reg = (r < 4) ? 8 : 9;
1651 if (r&3) EOP_MOV_REG_LSR(0, reg, (r&3)*8); // mov r0, r{7,8}, lsr #lsr
1652 EOP_AND_IMM(0, (r&3)?0:reg, 0, 0xff); // and r0, r{7,8}, <mask>
1653 EOP_C_DOP_REG_XIMM(A_COND_AL,tmpv2,1,5,tmpv3,16,A_AM1_LSL,0); // OPs r5, r5, r0, lsl #16
1656 hostreg_sspreg_changed(SSP_A);
1657 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1658 dirty_regb |= KRREG_ST;
1669 tmpv2 = tr_aop_ssp2arm(op>>13); // op
1670 tmpv3 = (tmpv2 == A_OP_CMP) ? 0 : 5;
1671 EOP_C_DOP_IMM(A_COND_AL,tmpv2,1,5,tmpv3,16/2,op & 0xff); // OPs r5, r5, #val<<16
1672 hostreg_sspreg_changed(SSP_A);
1673 known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
1674 dirty_regb |= KRREG_ST;
1683 static void emit_block_prologue(void)
1685 // check if there are enough cycles..
1686 // note: r0 must contain PC of current block
1687 EOP_CMP_IMM(11,0,0); // cmp r11, #0
1688 emith_jump_cond(A_COND_LE, ssp_drc_end);
1692 * >0: direct (un)conditional jump
1695 static void *emit_block_epilogue(int cycles, int cond, int pc, int end_pc)
1697 void *end_ptr = NULL;
1699 if (cycles > 0xff) {
1700 elprintf(EL_ANOMALY, "large cycle count: %i\n", cycles);
1703 EOP_SUB_IMM(11,11,0,cycles); // sub r11, r11, #cycles
1705 if (cond < 0 || (end_pc >= 0x400 && pc < 0x400)) {
1706 // indirect jump, or rom -> iram jump, must use dispatcher
1707 emith_jump(ssp_drc_next);
1709 else if (cond == A_COND_AL) {
1710 u32 *target = (pc < 0x400) ?
1711 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1712 ssp_block_table[pc];
1716 int ops = emith_jump(ssp_drc_next);
1717 end_ptr = tcache_ptr;
1718 // cause the next block to be emitted over jump instruction
1723 u32 *target1 = (pc < 0x400) ?
1724 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + pc] :
1725 ssp_block_table[pc];
1726 u32 *target2 = (end_pc < 0x400) ?
1727 ssp_block_table_iram[ssp->drc.iram_context * SSP_BLOCKTAB_IRAM_ONE + end_pc] :
1728 ssp_block_table[end_pc];
1729 if (target1 != NULL)
1730 emith_jump_cond(cond, target1);
1731 if (target2 != NULL)
1732 emith_jump_cond(tr_neg_cond(cond), target2); // neg_cond, to be able to swap jumps if needed
1734 // emit patchable branches
1735 if (target1 == NULL)
1736 emith_call_cond(cond, ssp_drc_next_patch);
1737 if (target2 == NULL)
1738 emith_call_cond(tr_neg_cond(cond), ssp_drc_next_patch);
1740 // won't patch indirect jumps
1741 if (target1 == NULL || target2 == NULL)
1742 emith_jump(ssp_drc_next);
1746 if (end_ptr == NULL)
1747 end_ptr = tcache_ptr;
1752 void *ssp_translate_block(int pc)
1754 unsigned int op, op1, imm, ccount = 0;
1755 unsigned int *block_start, *block_end;
1756 int ret, end_cond = A_COND_AL, jump_pc = -1;
1758 //printf("translate %04x -> %04x\n", pc<<1, (tcache_ptr-tcache)<<2);
1760 block_start = tcache_ptr;
1762 dirty_regb = KRREG_P;
1763 known_regs.emu_status = 0;
1766 emit_block_prologue();
1768 for (; ccount < 100;)
1774 if ((op1 & 0xf) == 4 || (op1 & 0xf) == 6)
1775 imm = PROGRAM(pc++); // immediate
1777 ret = translate_op(op, &pc, imm, &end_cond, &jump_pc);
1780 elprintf(EL_ANOMALY, "NULL func! op=%08x (%02x)\n", op, op1);
1784 ccount += ret & 0xffff;
1785 if (ret & 0x10000) break;
1788 if (ccount >= 100) {
1789 end_cond = A_COND_AL;
1791 emith_move_r_imm(0, pc);
1794 tr_flush_dirty_prs();
1795 tr_flush_dirty_ST();
1796 tr_flush_dirty_pmcrs();
1797 block_end = emit_block_epilogue(ccount, end_cond, jump_pc, pc);
1799 if (tcache_ptr - (u32 *)tcache > DRC_TCACHE_SIZE/4) {
1800 elprintf(EL_ANOMALY|EL_STATUS|EL_SVP, "tcache overflow!\n");
1807 //printf("%i blocks, %i bytes, k=%.3f\n", nblocks, (tcache_ptr - tcache)*4,
1808 // (double)(tcache_ptr - tcache) / (double)n_in_ops);
1812 FILE *f = fopen("tcache.bin", "wb");
1813 fwrite(tcache, 1, (tcache_ptr - tcache)*4, f);
1816 printf("dumped tcache.bin\n");
1821 cache_flush_d_inval_i(block_start, block_end);
1829 // -----------------------------------------------------
1831 static void ssp1601_state_load(void)
1833 ssp->drc.iram_dirty = 1;
1834 ssp->drc.iram_context = 0;
1837 void ssp1601_dyn_exit(void)
1839 free(ssp_block_table);
1840 free(ssp_block_table_iram);
1841 ssp_block_table = ssp_block_table_iram = NULL;
1846 int ssp1601_dyn_startup(void)
1850 ssp_block_table = calloc(sizeof(ssp_block_table[0]), SSP_BLOCKTAB_ENTS);
1851 if (ssp_block_table == NULL)
1853 ssp_block_table_iram = calloc(sizeof(ssp_block_table_iram[0]), SSP_BLOCKTAB_IRAM_ENTS);
1854 if (ssp_block_table_iram == NULL) {
1855 free(ssp_block_table);
1859 memset(tcache, 0, DRC_TCACHE_SIZE);
1860 tcache_ptr = (void *)tcache;
1862 PicoLoadStateHook = ssp1601_state_load;
1867 ssp_block_table[0x800/2] = (void *) ssp_hle_800;
1868 ssp_block_table[0x902/2] = (void *) ssp_hle_902;
1869 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x030/2] = (void *) ssp_hle_07_030;
1870 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x036/2] = (void *) ssp_hle_07_036;
1871 ssp_block_table_iram[ 7 * SSP_BLOCKTAB_IRAM_ONE + 0x6d6/2] = (void *) ssp_hle_07_6d6;
1872 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x12c/2] = (void *) ssp_hle_11_12c;
1873 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x384/2] = (void *) ssp_hle_11_384;
1874 ssp_block_table_iram[11 * SSP_BLOCKTAB_IRAM_ONE + 0x38a/2] = (void *) ssp_hle_11_38a;
1881 void ssp1601_dyn_reset(ssp1601_t *ssp)
1884 ssp->drc.iram_dirty = 1;
1885 ssp->drc.iram_context = 0;
1886 // must do this here because ssp is not available @ startup()
1887 ssp->drc.ptr_rom = (u32) Pico.rom;
1888 ssp->drc.ptr_iram_rom = (u32) svp->iram_rom;
1889 ssp->drc.ptr_dram = (u32) svp->dram;
1890 ssp->drc.ptr_btable = (u32) ssp_block_table;
1891 ssp->drc.ptr_btable_iram = (u32) ssp_block_table_iram;
1893 // prevent new versions of IRAM from appearing
1894 memset(svp->iram_rom, 0, 0x800);
1898 void ssp1601_dyn_run(int cycles)
1900 if (ssp->emu_status & SSP_WAIT_MASK) return;
1903 ssp_translate_block(DUMP_BLOCK >> 1);
1906 ssp_drc_entry(ssp, cycles);