/*
- * vim:shiftwidth=2:expandtab
+ * SH2 recompiler
+ * (C) notaz, 2009,2010
+ *
+ * This work is licensed under the terms of MAME license.
+ * See COPYING file in the top-level directory.
*
* notes:
* - tcache, block descriptor, link buffer overflows result in sh2_translate()
* failure, followed by full tcache invalidation for that region
* - jumps between blocks are tracked for SMC handling (in block_links[]),
* except jumps between different tcaches
+ * - non-main block entries are called subblocks, as they have same tracking
+ * structures that main blocks have.
*
* implemented:
* - static register allocation
* - some constant propagation
*
* TODO:
- * - proper SMC handling
* - better constant propagation
* - stack caching?
* - bug fixing
#define PROPAGATE_CONSTANTS 1
#define LINK_BRANCHES 1
+// limits (per block)
+#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
+
// max literal offset from the block end
#define MAX_LITERAL_OFFSET 32*2
+#define MAX_LITERALS (BLOCK_CYCLE_LIMIT / 4)
+#define MAX_LOCAL_BRANCHES 32
+
+///
+#define FETCH_OP(pc) \
+ dr_pc_base[(pc) / 2]
+
+#define FETCH32(a) \
+ ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
-// debug stuff {
+#ifdef DRC_SH2
+
+// debug stuff
+// 1 - ?
+// 2 - ?
+// 4 - log asm
+// {
#ifndef DRC_DEBUG
#define DRC_DEBUG 0
#endif
}
#include "mame/sh2dasm.h"
-#include <platform/linux/host_dasm.h>
+#include <platform/libpicofe/linux/host_dasm.h>
static int insns_compiled, hash_collisions, host_insn_count;
#define COUNT_OP \
host_insn_count++
#define dbg(...)
#endif
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
static u8 *tcache_dsm_ptrs[3];
static char sh2dasm_buff[64];
#define do_host_disasm(tcid) \
#define do_host_disasm(x)
#endif
-#if (DRC_DEBUG & 4) || defined(PDB)
+#if (DRC_DEBUG & 8) || defined(PDB)
static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
{
if (block != NULL) {
- dbg(4, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
+ dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
sh2->pc, block, (signed int)sr >> 12);
pdb_step(sh2, sh2->pc);
}
#endif
// } debug
-#define BLOCK_CYCLE_LIMIT 100
-#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
#define TCACHE_BUFFERS 3
// we have 3 translation cache buffers, split from one drc/cmn buffer.
u32 addr; // SH2 PC address
void *tcache_ptr; // translated block for above PC
struct block_desc_ *next; // next block with the same PC hash
-#if (DRC_DEBUG & 1)
+#if (DRC_DEBUG & 2)
int refcount;
#endif
} block_desc;
// note: reg_temp[] must have at least the amount of
// registers used by handlers in worst case (currently 4)
-#ifdef ARM
+#ifdef __arm__
#include "../drc/emit_arm.c"
static const int reg_map_g2h[] = {
4, 5, 6, 7,
8, -1, -1, -1,
-1, -1, -1, -1,
- -1, -1, -1, 9,
- -1, -1, -1, 10,
- -1, -1, -1, -1,
+ -1, -1, -1, 9, // r12 .. sp
+ -1, -1, -1, 10, // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
+ -1, -1, -1, -1, // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
};
static temp_reg_t reg_temp[] = {
static void REGPARM(2) (*sh2_drc_write16_slot)(u32 a, u32 d);
static int REGPARM(3) (*sh2_drc_write32)(u32 a, u32 d, SH2 *sh2);
-extern void REGPARM(2) sh2_do_op(SH2 *sh2, int opcode);
-
// address space stuff
-static void *dr_get_pc_base(u32 pc, int is_slave)
-{
- void *ret = NULL;
- u32 mask = 0;
-
- if ((pc & ~0x7ff) == 0) {
- // BIOS
- ret = is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
- mask = 0x7ff;
- }
- else if ((pc & 0xfffff000) == 0xc0000000) {
- // data array
- ret = Pico32xMem->data_array[is_slave];
- mask = 0xfff;
- }
- else if ((pc & 0xc6000000) == 0x06000000) {
- // SDRAM
- ret = Pico32xMem->sdram;
- mask = 0x03ffff;
- }
- else if ((pc & 0xc6000000) == 0x02000000) {
- // ROM
- ret = Pico.rom;
- mask = 0x3fffff;
- }
-
- if (ret == NULL)
- return (void *)-1; // NULL is valid value
-
- return (char *)ret - (pc & ~mask);
-}
-
static int dr_ctx_get_mem_ptr(u32 a, u32 *mask)
{
int poffs = -1;
}
else
memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[0]));
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
#endif
}
int cnt = block_link_counts[tcache_id];
if (cnt >= block_max_counts[tcache_id] * 2) {
- printf("bl overflow for tcache %d\n", tcache_id);
+ dbg(1, "bl overflow for tcache %d\n", tcache_id);
return -1;
}
bd = dr_get_bd(addr, is_slave, &tcache_id);
if (bd != NULL) {
- dbg(1, "block override for %08x", addr);
+ dbg(2, "block override for %08x", addr);
bd->tcache_ptr = tcache_ptr;
*blk_id = bd - block_tables[tcache_id];
return bd;
bcount = &block_counts[tcache_id];
if (*bcount >= block_max_counts[tcache_id]) {
- printf("bd overflow for tcache %d\n", tcache_id);
+ dbg(1, "bd overflow for tcache %d", tcache_id);
return NULL;
}
if (*bcount == 0)
if ((addr & 0xc6000000) == 0x02000000) { // ROM
bd->next = HASH_FUNC(hash_table, addr);
HASH_FUNC(hash_table, addr) = bd;
-#if (DRC_DEBUG & 1)
+#if (DRC_DEBUG & 2)
if (bd->next != NULL) {
printf(" hash collision with %08x\n", bd->next->addr);
hash_collisions++;
if (bd != NULL)
block = bd->tcache_ptr;
-#if (DRC_DEBUG & 1)
+#if (DRC_DEBUG & 2)
if (bd != NULL)
bd->refcount++;
#endif
return block;
}
+static void *dr_failure(void)
+{
+ lprintf("recompilation failed\n");
+ exit(1);
+}
+
static void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id)
{
#if LINK_BRANCHES
for (i = 0; i < cnt; i++) {
if (bl[i].target_pc == pc) {
- dbg(1, "- link from %p", bl[i].jump);
+ dbg(2, "- link from %p", bl[i].jump);
emith_jump_patch(bl[i].jump, target);
// XXX: sync ARM caches (old jump should be fine)?
}
#define ADD_TO_ARRAY(array, count, item, failcode) \
array[count++] = item; \
if (count >= ARRAY_SIZE(array)) { \
- printf("warning: " #array " overflow\n"); \
+ dbg(1, "warning: " #array " overflow"); \
failcode; \
}
if (reg_temp[i].hreg == r)
break;
- if (i == ARRAY_SIZE(reg_temp))
- // let's just say it's untracked arg reg
- return r;
+ if (i == ARRAY_SIZE(reg_temp)) // can't happen
+ exit(1);
if (reg_temp[i].type == HR_CACHED) {
// writeback
static int rcache_get_reg_arg(int arg, sh2_reg_e r)
{
int i, srcr, dstr, dstid;
- int dirty = 0;
+ int dirty = 0, src_dirty = 0;
dstid = rcache_get_arg_id(arg);
dstr = reg_temp[dstid].hreg;
reg_temp[i].greg == r)
{
srcr = reg_temp[i].hreg;
+ if (reg_temp[i].flags & HRF_DIRTY)
+ src_dirty = 1;
goto do_cache;
}
}
do_cache:
if (dstr != srcr)
emith_move_r_r(dstr, srcr);
+#if 1
+ else
+ dirty |= src_dirty;
+
+ if (dirty)
+ // must clean, callers might want to modify the arg before call
+ emith_ctx_write(dstr, r * 4);
+#else
+ if (dirty)
+ reg_temp[dstid].flags |= HRF_DIRTY;
+#endif
reg_temp[dstid].stamp = ++rcache_counter;
reg_temp[dstid].type = HR_CACHED;
reg_temp[dstid].greg = r;
reg_temp[dstid].flags |= HRF_LOCKED;
- if (dirty)
- reg_temp[dstid].flags |= HRF_DIRTY;
return dstr;
}
}
}
rcache_invalidate();
+
+ if (reg_map_g2h[SHR_SR] != -1)
+ emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
+
// assuming arg0 and retval reg matches
return rcache_get_tmp_arg(0);
}
{
int ctxr;
host_arg2reg(ctxr, 2);
+ if (reg_map_g2h[SHR_SR] != -1)
+ emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
+
switch (size) {
case 0: // 8
// XXX: consider inlining sh2_drc_write8
emith_call(sh2_drc_write32);
break;
}
+
+ if (reg_map_g2h[SHR_SR] != -1)
+ emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
rcache_invalidate();
}
static void emit_block_entry(void)
{
- int arg0, arg1, arg2;
+ int arg0;
host_arg2reg(arg0, 0);
+
+#if (DRC_DEBUG & 8) || defined(PDB)
+ int arg1, arg2;
host_arg2reg(arg1, 1);
host_arg2reg(arg2, 2);
-#if (DRC_DEBUG & 4) || defined(PDB)
emit_do_static_regs(1, arg2);
emith_move_r_r(arg1, CONTEXT_REG);
emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
goto default_; \
}
-#define FETCH_OP(pc) \
- dr_pc_base[(pc) / 2]
-
-#define FETCH32(a) \
- ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
-
#define GET_Fx() \
((op >> 4) & 0x0f)
if (GET_Fx() >= n) \
goto default_
-#define MAX_LOCAL_BRANCHES 32
-
-// op_flags: data from 1st pass
-#define OP_FLAGS(pc) op_flags[((pc) - base_pc) / 2]
-#define OF_DELAY_OP (1 << 0)
+static void *dr_get_pc_base(u32 pc, int is_slave);
static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
{
void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
int branch_patch_count = 0;
+ u32 literal_addr[MAX_LITERALS];
+ int literal_addr_count = 0;
int pending_branch_cond = -1;
int pending_branch_pc = 0;
- u8 op_flags[BLOCK_CYCLE_LIMIT + 1];
+ u8 op_flags[BLOCK_CYCLE_LIMIT];
struct {
u32 delayed_op:2;
u32 test_irq:1;
// PC of current, first, last, last_target_blk SH2 insn
u32 pc, base_pc, end_pc, out_pc;
- u32 last_inlined_literal = 0;
void *block_entry;
block_desc *this_block;
u16 *dr_pc_base;
// predict tcache overflow
tmp = tcache_ptr - tcache_bases[tcache_id];
if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE) {
- printf("tcache %d overflow\n", tcache_id);
+ dbg(1, "tcache %d overflow", tcache_id);
return NULL;
}
block_entry = tcache_ptr;
- dbg(1, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
+ dbg(2, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
tcache_id, blkid_main, base_pc, block_entry);
dr_link_blocks(tcache_ptr, base_pc, tcache_id);
// 1st pass: scan forward for local branches
- memset(op_flags, 0, sizeof(op_flags));
- for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT; cycles++, pc += 2) {
- op = FETCH_OP(pc);
- if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
- signed int offs = ((signed int)(op << 20) >> 19);
- pc += 2;
- OP_FLAGS(pc) |= OF_DELAY_OP;
- ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 2,);
- break;
- }
- if ((op & 0xf000) == 0) {
- op &= 0xff;
- if (op == 0x1b) // SLEEP
- break;
- if (op == 0x23 || op == 0x03 || op == 0x0b || op == 0x2b) { // BRAF, BSRF, RTS, RTE
- pc += 2;
- OP_FLAGS(pc) |= OF_DELAY_OP;
- break;
- }
+ scan_block(base_pc, sh2->is_slave, op_flags, &end_pc);
+
+ // collect branch_targets that don't land on delay slots
+ for (pc = base_pc; pc <= end_pc; pc += 2) {
+ if (!(OP_FLAGS(pc) & OF_TARGET))
+ continue;
+ if (OP_FLAGS(pc) & OF_DELAY_OP) {
+ OP_FLAGS(pc) &= ~OF_TARGET;
continue;
}
- if ((op & 0xf0df) == 0x400b) { // JMP, JSR
- pc += 2;
- OP_FLAGS(pc) |= OF_DELAY_OP;
- break;
- }
- if ((op & 0xf900) == 0x8900) { // BT(S), BF(S)
- signed int offs = ((signed int)(op << 24) >> 23);
- if (op & 0x0400)
- OP_FLAGS(pc + 2) |= OF_DELAY_OP;
- ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 4, break);
- }
- if ((op & 0xff00) == 0xc300) // TRAPA
- break;
+ ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
}
- end_pc = pc;
-
- // clean branch_targets that are not really local,
- // and that land on delay slots
- for (i = 0, tmp = 0; i < branch_target_count; i++) {
- pc = branch_target_pc[i];
- if (base_pc <= pc && pc <= end_pc && !(OP_FLAGS(pc) & OF_DELAY_OP))
- branch_target_pc[tmp++] = branch_target_pc[i];
+ if (branch_target_count > 0) {
+ memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
+ memset(branch_target_blkid, 0, sizeof(branch_target_blkid[0]) * branch_target_count);
}
- branch_target_count = tmp;
- memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
- memset(branch_target_blkid, 0, sizeof(branch_target_blkid[0]) * branch_target_count);
// -------------------------------------------------
// 2nd pass: actual compilation
op = FETCH_OP(pc);
- i = find_in_array(branch_target_pc, branch_target_count, pc);
- if (i >= 0)
+ if ((OP_FLAGS(pc) & OF_TARGET) || pc == base_pc)
{
+ i = find_in_array(branch_target_pc, branch_target_count, pc);
if (pc != base_pc)
{
/* make "subblock" - just a mid-block entry */
rcache_flush();
do_host_disasm(tcache_id);
+ dbg(2, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
+ tcache_id, branch_target_blkid[i], pc, tcache_ptr);
+
subblock = dr_add_block(pc, sh2->is_slave, &branch_target_blkid[i]);
if (subblock == NULL)
return NULL;
- dbg(1, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
- tcache_id, branch_target_blkid[i], pc, tcache_ptr);
-
// since we made a block entry, link any other blocks that jump to current pc
dr_link_blocks(tcache_ptr, pc, tcache_id);
}
- branch_target_ptr[i] = tcache_ptr;
+ if (i >= 0)
+ branch_target_ptr[i] = tcache_ptr;
// must update PC
emit_move_r_imm32(SHR_PC, pc);
emith_cmp_r_imm(sr, 0);
emith_jump_cond(DCOND_LE, sh2_drc_exit);
do_host_disasm(tcache_id);
+ rcache_unlock_all();
}
-#if (DRC_DEBUG & 3)
- insns_compiled++;
#if (DRC_DEBUG & 2)
+ insns_compiled++;
+#endif
+#if (DRC_DEBUG & 4)
DasmSH2(sh2dasm_buff, pc, op);
printf("%08x %04x %s\n", pc, op, sh2dasm_buff);
#endif
+#ifdef DRC_CMP
+ //if (out_pc != 0 && out_pc != (u32)-1)
+ // emit_move_r_imm32(SHR_PC, out_pc);
+ //else
+ if (!drcf.delayed_op) {
+ emit_move_r_imm32(SHR_PC, pc);
+ sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ FLUSH_CYCLES(sr);
+ // rcache_clean(); // FIXME
+ rcache_flush();
+ emit_do_static_regs(1, 0);
+ emith_pass_arg_r(0, CONTEXT_REG);
+ emith_call(do_sh2_cmp);
+ }
#endif
pc += 2;
EMITH_JMP_END(DCOND_EQ);
rcache_free_tmp(tmp);
- cycles += 3;
+ cycles += 2;
goto end_op;
}
goto default_;
emith_clear_msb(tmp2, tmp3, 16);
emith_mul(tmp, tmp, tmp2);
rcache_free_tmp(tmp2);
-// FIXME: causes timing issues in Doom?
-// cycles++;
goto end_op;
}
goto default_;
tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
emith_mul_u64(tmp3, tmp4, tmp, tmp2);
+ cycles++;
goto end_op;
case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
emith_mul_s64(tmp3, tmp4, tmp, tmp2);
+ cycles++;
goto end_op;
}
goto default_;
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
if (drcf.delayed_op)
DELAY_SAVE_T(sr);
+#ifndef DRC_CMP
if (FETCH_OP(pc) == 0x8bfd) { // BF #-2
if (gconst_get(GET_Rn(), &tmp)) {
// XXX: limit burned cycles
emith_sh2_dtbf_loop();
goto end_op;
}
+#endif
tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
emith_bic_r_imm(sr, T);
emith_subf_r_imm(tmp, 1);
break;
case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
tmp = SHR_SR;
+ cycles++;
break;
case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
tmp = SHR_GBR;
+ cycles++;
break;
case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
tmp = SHR_VBR;
+ cycles++;
break;
default:
goto default_;
break;
case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
tmp = SHR_SR;
+ cycles += 2;
break;
case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
tmp = SHR_GBR;
+ cycles += 2;
break;
case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
tmp = SHR_VBR;
+ cycles += 2;
break;
default:
goto default_;
case 0x0d00: // BT/S label 10001101dddddddd
case 0x0f00: // BF/S label 10001111dddddddd
DELAYED_OP;
- cycles--;
// fallthrough
case 0x0900: // BT label 10001001dddddddd
case 0x0b00: // BF label 10001011dddddddd
pending_branch_cond = (op & 0x0200) ? DCOND_EQ : DCOND_NE;
i = ((signed int)(op << 24) >> 23);
pending_branch_pc = pc + i + 2;
- cycles += 2;
goto end_op;
}
goto default_;
// MOV.W @(disp,PC),Rn 1001nnnndddddddd
tmp = pc + (op & 0xff) * 2 + 2;
#if PROPAGATE_CONSTANTS
- if (tmp < end_pc + MAX_LITERAL_OFFSET) {
+ if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
+ ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
gconst_new(GET_Rn(), (u32)(int)(signed short)FETCH_OP(tmp));
- if (last_inlined_literal < tmp)
- last_inlined_literal = tmp;
}
else
#endif
// MOV.L @(disp,PC),Rn 1101nnnndddddddd
tmp = (pc + (op & 0xff) * 4 + 2) & ~3;
#if PROPAGATE_CONSTANTS
- if (tmp < end_pc + MAX_LITERAL_OFFSET) {
+ if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
+ ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
gconst_new(GET_Rn(), FETCH32(tmp));
- if (last_inlined_literal < tmp)
- last_inlined_literal = tmp;
}
else
#endif
default_:
elprintf(EL_ANOMALY, "%csh2 drc: unhandled op %04x @ %08x",
sh2->is_slave ? 's' : 'm', op, pc - 2);
-#ifdef DRC_DEBUG_INTERP
- emit_move_r_imm32(SHR_PC, pc - 2);
- rcache_flush();
- emith_pass_arg_r(0, CONTEXT_REG);
- emith_pass_arg_imm(1, op);
- emith_call(sh2_do_op);
-#endif
break;
}
if (pending_branch_cond != -1 && drcf.delayed_op != 2)
{
u32 target_pc = pending_branch_pc;
+ int ctaken = drcf.delayed_op ? 1 : 2;
void *target;
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
- // handle cycles
FLUSH_CYCLES(sr);
- rcache_clean();
if (drcf.use_saved_t)
emith_tst_r_imm(sr, T_save);
else
emith_tst_r_imm(sr, T);
+ // handle cycles
+ emith_sub_r_imm_c(pending_branch_cond, sr, ctaken<<12);
+ rcache_clean();
+
#if LINK_BRANCHES
if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0) {
// local branch
branch_patch_count++;
if (branch_patch_count == MAX_LOCAL_BRANCHES) {
- printf("warning: too many local branches\n");
+ dbg(1, "warning: too many local branches");
break;
}
}
t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
target = branch_target_ptr[t];
if (target == NULL) {
- // flush pc and go back to dispatcher (should no longer happen)
- printf("stray branch to %08x %p\n", branch_patch_pc[i], tcache_ptr);
+ // flush pc and go back to dispatcher (this should no longer happen)
+ dbg(1, "stray branch to %08x %p", branch_patch_pc[i], tcache_ptr);
target = tcache_ptr;
emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
rcache_flush();
}
end_pc = pc;
- if (last_inlined_literal > end_pc)
- end_pc = last_inlined_literal + 4;
// mark memory blocks as containing compiled code
// override any overlay blocks as they become unreachable anyway
for (pc = base_pc + 2; pc < end_pc; pc += 2)
drc_ram_blk[(pc >> shift) & mask] = blkid_main << 1;
- // mark subblocks too
+ // mark subblocks
for (i = 0; i < branch_target_count; i++)
if (branch_target_blkid[i] != 0)
drc_ram_blk[(branch_target_pc[i] >> shift) & mask] =
- branch_target_blkid[i] << 1;
+ (branch_target_blkid[i] << 1) | 1;
+
+ // mark literals
+ for (i = 0; i < literal_addr_count; i++) {
+ tmp = literal_addr[i];
+ drc_ram_blk[(tmp >> shift) & mask] = blkid_main << 1;
+ if (!(tmp & 3)) // assume long
+ drc_ram_blk[((tmp + 2) >> shift) & mask] = blkid_main << 1;
+ }
}
tcache_ptrs[tcache_id] = tcache_ptr;
host_instructions_updated(block_entry, tcache_ptr);
do_host_disasm(tcache_id);
- dbg(1, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
+ dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
tcache_id, block_counts[tcache_id],
tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
insns_compiled, host_insn_count, (double)host_insn_count / insns_compiled);
if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM
- dbg(1, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
+ dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
/*
printf("~~~\n");
tcache_dsm_ptrs[tcache_id] = block_entry;
printf("~~~\n");
*/
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
fflush(stdout);
#endif
emith_call(sh2_translate);
emit_block_entry();
// XXX: can't translate, fail
- emith_call(exit);
+ emith_call(dr_failure);
// sh2_drc_test_irq(void)
// assumes it's called from main function (may jump to dispatcher)
MAKE_WRITE_WRAPPER(sh2_drc_write16);
MAKE_WRITE_WRAPPER(sh2_drc_write16_slot);
MAKE_WRITE_WRAPPER(sh2_drc_write32);
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
host_dasm_new_symbol(sh2_drc_read8);
host_dasm_new_symbol(sh2_drc_read16);
host_dasm_new_symbol(sh2_drc_read32);
#endif
rcache_invalidate();
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
host_dasm_new_symbol(sh2_drc_entry);
host_dasm_new_symbol(sh2_drc_dispatcher);
host_dasm_new_symbol(sh2_drc_exit);
static void *sh2_smc_rm_block_entry(block_desc *bd, int tcache_id)
{
+ void *tmp;
+
// XXX: kill links somehow?
- dbg(1, " killing entry %08x, blkid %d", bd->addr, bd - block_tables[tcache_id]);
- bd->addr = 0;
+ dbg(2, " killing entry %08x, blkid %d", bd->addr, bd - block_tables[tcache_id]);
+ if (bd->addr == 0 || bd->tcache_ptr == NULL) {
+ dbg(1, " killing dead block!? %08x", bd->addr);
+ return bd->tcache_ptr;
+ }
+
// since we never reuse space of dead blocks,
// insert jump to dispatcher for blocks that are linked to this point
- emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher);
+ //emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher);
+
+ // attempt to handle self-modifying blocks by exiting at nearest known PC
+ tmp = tcache_ptr;
+ tcache_ptr = bd->tcache_ptr;
+ emit_move_r_imm32(SHR_PC, bd->addr);
+ rcache_flush();
+ emith_jump(sh2_drc_dispatcher);
+ tcache_ptr = tmp;
+
+ bd->addr = 0;
return bd->tcache_ptr;
}
//int bl_count = block_link_counts[tcache_id];
block_desc *btab = block_tables[tcache_id];
u16 *p = drc_ram_blk + ((a & mask) >> shift);
- u16 *pe = drc_ram_blk + (mask >> shift);
+ u16 *pmax = drc_ram_blk + (mask >> shift);
void *tcache_min, *tcache_max;
- int main_id, prev_id = 0;
+ int zeros;
+ u16 *pt;
+
+ // Figure out what the main block is, as subblocks also have the flag set.
+ // This relies on sub having single entry. It's possible that innocent
+ // block might be hit, but that's not such a big deal.
+ if ((p[0] >> 1) != (p[1] >> 1)) {
+ for (; p > drc_ram_blk; p--)
+ if (p[-1] == 0 || (p[-1] >> 1) == (*p >> 1))
+ break;
+ }
+ pt = p;
- while (p > drc_ram_blk && (*p & 1) == 0)
- p--;
+ for (; p > drc_ram_blk; p--)
+ if ((*p & 1))
+ break;
- if (!(*p & 1))
- printf("smc rm: missing block start for %08x?\n", a);
- main_id = *p >> 1;
- tcache_min = tcache_max = sh2_smc_rm_block_entry(&btab[main_id], tcache_id);
+ if (!(*p & 1)) {
+ dbg(1, "smc rm: missing block start for %08x?", a);
+ p = pt;
+ }
- for (*p++ = 0; p <= pe && *p != 0 && !(*p & 1); p++) {
+ if (*p == 0)
+ return;
+
+ tcache_min = tcache_max = sh2_smc_rm_block_entry(&btab[*p >> 1], tcache_id);
+ *p = 0;
+
+ for (p++, zeros = 0; p < pmax && zeros < MAX_LITERAL_OFFSET / 2; p++) {
int id = *p >> 1;
- if (id != main_id && id != prev_id)
- tcache_max = sh2_smc_rm_block_entry(&btab[*p >> 1], tcache_id);
+ if (id == 0) {
+ // there can be holes because games sometimes keep variables
+ // directly in literal pool and we don't inline them to avoid recompile
+ // (Star Wars Arcade)
+ zeros++;
+ continue;
+ }
+ if (*p & 1) {
+ if (id == (p[1] >> 1))
+ // hit other block
+ break;
+ tcache_max = sh2_smc_rm_block_entry(&btab[id], tcache_id);
+ }
*p = 0;
- prev_id = id;
}
- host_instructions_updated(tcache_min, (void *)((char *)tcache_max + 4));
+ host_instructions_updated(tcache_min, (void *)((char *)tcache_max + 4*4 + 4));
}
void sh2_drc_wcheck_ram(unsigned int a, int val, int cpuid)
{
- dbg(1, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
+ dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
sh2_smc_rm_block(a, Pico32xMem->drcblk_ram, 0, SH2_DRCBLK_RAM_SHIFT, 0x3ffff);
}
void sh2_drc_wcheck_da(unsigned int a, int val, int cpuid)
{
- dbg(1, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
+ dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
sh2_smc_rm_block(a, Pico32xMem->drcblk_da[cpuid],
1 + cpuid, SH2_DRCBLK_DA_SHIFT, 0xfff);
}
-void sh2_execute(SH2 *sh2c, int cycles)
+int sh2_execute(SH2 *sh2c, int cycles)
{
int ret_cycles;
- sh2 = sh2c; // XXX
- sh2c->cycles_aim += cycles;
- cycles = sh2c->cycles_aim - sh2c->cycles_done;
+ sh2c->cycles_timeslice = cycles;
// cycles are kept in SHR_SR unused bits (upper 20)
- // bit19 contains T saved for delay slot
+ // bit11 contains T saved for delay slot
// others are usual SH2 flags
sh2c->sr &= 0x3f3;
sh2c->sr |= cycles << 12;
// TODO: irq cycles
ret_cycles = (signed int)sh2c->sr >> 12;
if (ret_cycles > 0)
- printf("warning: drc returned with cycles: %d\n", ret_cycles);
+ dbg(1, "warning: drc returned with cycles: %d", ret_cycles);
- sh2c->cycles_done += cycles - ret_cycles;
+ return sh2c->cycles_timeslice - ret_cycles;
}
-#if (DRC_DEBUG & 1)
+#if (DRC_DEBUG & 2)
void block_stats(void)
{
int c, b, i, total = 0;
// tmp
PicoOpt |= POPT_DIS_VDP_FIFO;
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
for (i = 0; i < ARRAY_SIZE(block_tables); i++)
tcache_dsm_ptrs[i] = tcache_bases[i];
// disasm the utils
block_stats();
for (i = 0; i < TCACHE_BUFFERS; i++) {
-#if (DRC_DEBUG & 2)
+#if (DRC_DEBUG & 4)
printf("~~~ tcache %d\n", i);
tcache_dsm_ptrs[i] = tcache_bases[i];
tcache_ptr = tcache_ptrs[i];
hash_table = NULL;
}
}
+
+#endif /* DRC_SH2 */
+
+static void *dr_get_pc_base(u32 pc, int is_slave)
+{
+ void *ret = NULL;
+ u32 mask = 0;
+
+ if ((pc & ~0x7ff) == 0) {
+ // BIOS
+ ret = is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
+ mask = 0x7ff;
+ }
+ else if ((pc & 0xfffff000) == 0xc0000000) {
+ // data array
+ ret = Pico32xMem->data_array[is_slave];
+ mask = 0xfff;
+ }
+ else if ((pc & 0xc6000000) == 0x06000000) {
+ // SDRAM
+ ret = Pico32xMem->sdram;
+ mask = 0x03ffff;
+ }
+ else if ((pc & 0xc6000000) == 0x02000000) {
+ // ROM
+ ret = Pico.rom;
+ mask = 0x3fffff;
+ }
+
+ if (ret == NULL)
+ return (void *)-1; // NULL is valid value
+
+ return (char *)ret - (pc & ~mask);
+}
+
+void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc)
+{
+ u16 *dr_pc_base;
+ u32 pc, target, op;
+ int cycles;
+
+ memset(op_flags, 0, BLOCK_CYCLE_LIMIT);
+
+ dr_pc_base = dr_get_pc_base(base_pc, is_slave);
+
+ for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT-1; cycles++, pc += 2) {
+ op = FETCH_OP(pc);
+ if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
+ signed int offs = ((signed int)(op << 20) >> 19);
+ pc += 2;
+ OP_FLAGS(pc) |= OF_DELAY_OP;
+ target = pc + offs + 2;
+ if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
+ OP_FLAGS(target) |= OF_TARGET;
+ break;
+ }
+ if ((op & 0xf000) == 0) {
+ op &= 0xff;
+ if (op == 0x1b) // SLEEP
+ break;
+ // BRAF, BSRF, RTS, RTE
+ if (op == 0x23 || op == 0x03 || op == 0x0b || op == 0x2b) {
+ pc += 2;
+ OP_FLAGS(pc) |= OF_DELAY_OP;
+ break;
+ }
+ continue;
+ }
+ if ((op & 0xf0df) == 0x400b) { // JMP, JSR
+ pc += 2;
+ OP_FLAGS(pc) |= OF_DELAY_OP;
+ break;
+ }
+ if ((op & 0xf900) == 0x8900) { // BT(S), BF(S)
+ signed int offs = ((signed int)(op << 24) >> 23);
+ if (op & 0x0400)
+ OP_FLAGS(pc + 2) |= OF_DELAY_OP;
+ target = pc + offs + 4;
+ if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
+ OP_FLAGS(target) |= OF_TARGET;
+ }
+ if ((op & 0xff00) == 0xc300) // TRAPA
+ break;
+ }
+ *end_pc = pc;
+}
+
+// vim:shiftwidth=2:ts=2:expandtab