-// SSP1601 to ARM recompiler
-
-// (c) Copyright 2008, Grazvydas "notaz" Ignotas
-// Free for non-commercial use.
+/*
+ * SSP1601 to ARM recompiler
+ * (C) notaz, 2008,2009,2010
+ * (C) irixxxx, 2019-2023
+ *
+ * This work is licensed under the terms of MAME license.
+ * See COPYING file in the top-level directory.
+ */
-#include "../../pico_int.h"
-#include "../../../cpu/drc/cmn.h"
+#include <pico/pico_int.h>
+#include <cpu/drc/cmn.h>
#include "compiler.h"
// FIXME: asm has these hardcoded
#define SSP_FLAG_Z (1<<0xd)
#define SSP_FLAG_N (1<<0xf)
-#ifndef ARM
+#ifndef __arm__
//#define DUMP_BLOCK 0x0c9a
void ssp_drc_next(void){}
void ssp_drc_next_patch(void){}
void ssp_drc_end(void){}
#endif
-#include "../../../cpu/drc/emit_arm.c"
+#define COUNT_OP
+#include <cpu/drc/emit_arm.c>
// -----------------------------------------------------
static void tr_mov16_cond(int cond, int r, int val)
{
- emith_op_imm(cond, A_OP_MOV, r, val);
+ emith_move_r_imm_c(cond, r, val);
hostreg_r[r] = -1;
}
if (mod == 2)
known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] - count) & modulo);
else known_regs.r[r] = (known_regs.r[r] & ~modulo) | ((known_regs.r[r] + count) & modulo);
+ dirty_regb |= (1 << (r + 8));
}
else
{
return 0;
}
+#ifdef __MACH__
+/* spacial version of call for calling C needed on ios, since we use r9.. */
+static void emith_call_c_func(void *target)
+{
+ EOP_STMFD_SP(M2(7,9));
+ emith_call(target);
+ EOP_LDMFD_SP(M2(7,9));
+}
+#else
+#define emith_call_c_func emith_call
+#endif
+
// -----------------------------------------------------
//@ r4: XXYY
return;
}
+ tr_flush_dirty_pmcrs();
known_regb &= ~KRREG_PMC;
dirty_regb &= ~KRREG_PMC;
known_regb &= ~(1 << (20+reg));
// call the C code to handle this
tr_flush_dirty_ST();
- //tr_flush_dirty_pmcrs();
tr_mov16(0, reg);
- emith_call(ssp_pm_read);
+ emith_call_c_func(ssp_pm_read);
hostreg_clear();
}
EOP_ORR_REG_LSL(6, 6, 1, 4); // orr r6, r6, r1, lsl #4
TR_WRITE_R0_TO_REG(SSP_ST);
hostreg_r[1] = -1;
+ known_regb &= ~KRREG_ST;
dirty_regb &= ~KRREG_ST;
}
hostreg_sspreg_changed(SSP_AL);
if (const_val != -1) {
known_regs.gr[SSP_A].l = const_val;
- known_regb |= 1 << SSP_AL;
+ known_regb |= KRREG_AL;
} else
- known_regb &= ~(1 << SSP_AL);
+ known_regb &= ~KRREG_AL;
}
static void tr_r0_to_PMX(int reg)
return;
}
+ tr_flush_dirty_pmcrs();
known_regb &= ~KRREG_PMC;
dirty_regb &= ~KRREG_PMC;
known_regb &= ~(1 << (25+reg));
// call the C code to handle this
tr_flush_dirty_ST();
- //tr_flush_dirty_pmcrs();
tr_mov16(1, reg);
- emith_call(ssp_pm_write);
+ emith_call_c_func(ssp_pm_write);
hostreg_clear();
}
known_regs.emu_status |= SSP_PMC_HAVE_ADDR;
known_regs.pmc.l = const_val;
}
+ dirty_regb |= KRREG_PMC;
}
else
{
tr_flush_dirty_ST();
- if (known_regb & KRREG_PMC) {
+ if (dirty_regb & KRREG_PMC) {
emith_move_r_imm(1, known_regs.pmc.v);
EOP_STR_IMM(1,7,0x400+SSP_PMC*4);
- known_regb &= ~KRREG_PMC;
dirty_regb &= ~KRREG_PMC;
}
+ known_regb &= ~KRREG_PMC;
EOP_LDR_IMM(1,7,0x484); // ldr r1, [r7, #0x484] // emu_status
EOP_ADD_IMM(2,7,24/2,4); // add r2, r7, #0x400
EOP_TST_IMM(1, 0, SSP_PMC_HAVE_ADDR);
int reg = is_write ? ((tmpv>>4)&0x7) : (tmpv&0x7);
if (reg > 4) tr_unhandled();
if ((tmpv & 0x0f) != 0 && (tmpv & 0xf0) != 0) tr_unhandled();
- known_regs.pmac_read[is_write ? reg + 5 : reg] = pmcv;
+ if (is_write)
+ known_regs.pmac_write[reg] = pmcv;
+ else
+ known_regs.pmac_read[reg] = pmcv;
known_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
dirty_regb |= is_write ? (1 << (reg+25)) : (1 << (reg+20));
known_regs.emu_status &= ~SSP_PMC_SET;
EOP_ORR_IMM(6, 6, 24/2, 6); // orr r6, r6, 0x600
hostreg_sspreg_changed(SSP_ST);
known_regs.gr[SSP_ST].h = 0x60;
- known_regb |= 1 << SSP_ST;
+ known_regb |= KRREG_ST;
dirty_regb &= ~KRREG_ST;
(*pc) += 3*2;
n_in_ops += 3;
tmpv = tr_cond_check(op);
if (tmpv != A_COND_AL) {
jump_op = tcache_ptr;
- EOP_MOV_IMM(0, 0, 0); // placeholder for branch
+ EOP_C_B(tmpv, 0, 0); // placeholder for branch
}
tr_mov16(0, *pc);
tr_r0_to_STACK(*pc);
- if (tmpv != A_COND_AL) {
- u32 *real_ptr = tcache_ptr;
- tcache_ptr = jump_op;
- EOP_C_B(tr_neg_cond(tmpv),0,real_ptr - jump_op - 2);
- tcache_ptr = real_ptr;
- }
+ if (tmpv != A_COND_AL)
+ EOP_C_B_PTR(jump_op, tr_neg_cond(tmpv), 0,
+ tcache_ptr - jump_op - 2);
tr_mov16_cond(tmpv, 0, imm);
if (tmpv != A_COND_AL)
tr_mov16_cond(tr_neg_cond(tmpv), 0, *pc);
tr_make_dirty_ST();
EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_SUB,1,5,5,0,A_AM1_LSL,10); // subs r5, r5, r10
hostreg_sspreg_changed(SSP_A);
- known_regb &= ~(KRREG_A|KRREG_AL);
dirty_regb |= KRREG_ST;
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
ret++; break;
// mpya (rj), (ri), b
tr_make_dirty_ST();
EOP_C_DOP_REG_XIMM(A_COND_AL,A_OP_ADD,1,5,5,0,A_AM1_LSL,10); // adds r5, r5, r10
hostreg_sspreg_changed(SSP_A);
- known_regb &= ~(KRREG_A|KRREG_AL);
dirty_regb |= KRREG_ST;
+ known_regb &= ~(KRREG_A|KRREG_AL|KRREG_ST);
ret++; break;
// mld (rj), (ri), b
EOP_C_DOP_IMM(A_COND_AL,A_OP_MOV,1,0,5,0,0); // movs r5, #0
hostreg_sspreg_changed(SSP_A);
known_regs.gr[SSP_A].v = 0;
- known_regb |= (KRREG_A|KRREG_AL);
dirty_regb |= KRREG_ST;
+ known_regb &= ~KRREG_ST;
+ known_regb |= (KRREG_A|KRREG_AL);
tr_mac_load_XY(op);
ret++; break;
* >0: direct (un)conditional jump
* <0: indirect jump
*/
-static void emit_block_epilogue(int cycles, int cond, int pc, int end_pc)
+static void *emit_block_epilogue(int cycles, int cond, int pc, int end_pc)
{
- if (cycles > 0xff) { elprintf(EL_ANOMALY, "large cycle count: %i\n", cycles); cycles = 0xff; }
+ void *end_ptr = NULL;
+
+ if (cycles > 0xff) {
+ elprintf(EL_ANOMALY, "large cycle count: %i\n", cycles);
+ cycles = 0xff;
+ }
EOP_SUB_IMM(11,11,0,cycles); // sub r11, r11, #cycles
if (cond < 0 || (end_pc >= 0x400 && pc < 0x400)) {
ssp_block_table[pc];
if (target != NULL)
emith_jump(target);
- else {
- int ops = emith_jump(ssp_drc_next);
- // cause the next block to be emitted over jump instruction
- tcache_ptr -= ops;
- }
+ else
+ emith_jump(ssp_drc_next);
}
else {
u32 *target1 = (pc < 0x400) ?
emith_jump(ssp_drc_next);
#endif
}
+
+ if (end_ptr == NULL)
+ end_ptr = tcache_ptr;
+
+ return end_ptr;
}
void *ssp_translate_block(int pc)
{
unsigned int op, op1, imm, ccount = 0;
- unsigned int *block_start;
+ unsigned int *block_start, *block_end;
int ret, end_cond = A_COND_AL, jump_pc = -1;
//printf("translate %04x -> %04x\n", pc<<1, (tcache_ptr-tcache)<<2);
tr_flush_dirty_prs();
tr_flush_dirty_ST();
tr_flush_dirty_pmcrs();
- emit_block_epilogue(ccount, end_cond, jump_pc, pc);
+ block_end = emit_block_epilogue(ccount, end_cond, jump_pc, pc);
+ emith_flush();
+ emith_pool_commit(-1);
if (tcache_ptr - (u32 *)tcache > DRC_TCACHE_SIZE/4) {
elprintf(EL_ANOMALY|EL_STATUS|EL_SVP, "tcache overflow!\n");
- fflush(stdout);
exit(1);
}
// stats
nblocks++;
- //printf("%i blocks, %i bytes, k=%.3f\n", nblocks, (tcache_ptr - tcache)*4,
- // (double)(tcache_ptr - tcache) / (double)n_in_ops);
+ //printf("%i blocks, %i bytes, k=%.3f\n", nblocks, (u8 *)tcache_ptr - tcache,
+ // (double)((u8 *)tcache_ptr - tcache) / (double)n_in_ops);
#ifdef DUMP_BLOCK
{
exit(0);
#endif
- handle_caches();
+#ifdef __arm__
+ cache_flush_d_inval_i(block_start, block_end);
+#endif
return block_start;
}
PicoLoadStateHook = ssp1601_state_load;
n_in_ops = 0;
-#ifdef ARM
+#ifdef __arm__
// hle'd blocks
ssp_block_table[0x800/2] = (void *) ssp_hle_800;
ssp_block_table[0x902/2] = (void *) ssp_hle_902;
#ifdef DUMP_BLOCK
ssp_translate_block(DUMP_BLOCK >> 1);
#endif
-#ifdef ARM
- ssp_drc_entry(cycles);
+#ifdef __arm__
+ ssp_drc_entry(ssp, cycles);
#endif
}