/*
* vim:shiftwidth=2:expandtab
+ *
+ * notes:
+ * - tcache, block descriptor, link buffer overflows result in sh2_translate()
+ * failure, followed by full tcache invalidation for that region
+ * - jumps between blocks are tracked for SMC handling (in block_links[]),
+ * except jumps between different tcaches
+ *
+ * implemented:
+ * - static register allocation
+ * - remaining register caching and tracking in temporaries
+ * - block-local branch linking
+ * - block linking (except between tcaches)
+ *
+ * TODO:
+ * - proper SMC handling
+ * - constant propagation
+ * - stack caching?
+ * - bug fixing
*/
#include <stddef.h>
#include <stdio.h>
#else
#define do_host_disasm(x)
#endif
+
+#if (DRC_DEBUG & 4)
+static void REGPARM(3) *sh2_drc_announce_entry(void *block, SH2 *sh2, u32 sr)
+{
+ if (block != NULL)
+ dbg(4, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
+ sh2->pc, block, (signed int)sr >> 12);
+ return block;
+}
+#endif
// } debug
#define BLOCK_CYCLE_LIMIT 100
#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
+#define TCACHE_BUFFERS 3
// we have 3 translation cache buffers, split from one drc/cmn buffer.
// BIOS shares tcache with data array because it's only used for init
// and can be discarded early
// XXX: need to tune sizes
-static const int tcache_sizes[3] = {
+static const int tcache_sizes[TCACHE_BUFFERS] = {
DRC_TCACHE_SIZE * 6 / 8, // ROM, DRAM
DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2
DRC_TCACHE_SIZE / 8, // ... slave
};
-static u8 *tcache_bases[3];
-static u8 *tcache_ptrs[3];
+static u8 *tcache_bases[TCACHE_BUFFERS];
+static u8 *tcache_ptrs[TCACHE_BUFFERS];
// ptr for code emiters
static u8 *tcache_ptr;
+typedef struct block_desc_ {
+ u32 addr; // SH2 PC address
+ u32 end_addr; // TODO rm?
+ void *tcache_ptr; // translated block for above PC
+ struct block_desc_ *next; // next block with the same PC hash
+#if (DRC_DEBUG & 1)
+ int refcount;
+#endif
+} block_desc;
+
+typedef struct block_link_ {
+ u32 target_pc;
+ void *jump;
+// struct block_link_ *next;
+} block_link;
+
+static const int block_max_counts[TCACHE_BUFFERS] = {
+ 4*1024,
+ 256,
+ 256,
+};
+static block_desc *block_tables[TCACHE_BUFFERS];
+static block_link *block_links[TCACHE_BUFFERS];
+static int block_counts[TCACHE_BUFFERS];
+static int block_link_counts[TCACHE_BUFFERS];
+
// host register tracking
enum {
HR_FREE,
{ 3, },
};
-#else
+#elif defined(__i386__)
#include "../drc/emit_x86.c"
static const int reg_map_g2h[] = {
{ xDX, },
};
+#else
+#error unsupported arch
#endif
#define T 0x00000001
#define M 0x00000200
#define T_save 0x00000800
+#define I_SHIFT 4
#define Q_SHIFT 8
#define M_SHIFT 9
-typedef struct block_desc_ {
- u32 addr; // SH2 PC address
- u32 end_addr; // TODO rm?
- void *tcache_ptr; // translated block for above PC
- struct block_desc_ *next; // next block with the same PC hash
-#if (DRC_DEBUG & 1)
- int refcount;
-#endif
-} block_desc;
-
-static const int block_max_counts[3] = {
- 4*1024,
- 256,
- 256,
-};
-static block_desc *block_tables[3];
-static int block_counts[3];
-
// ROM hash table
#define MAX_HASH_ENTRIES 1024
#define HASH_MASK (MAX_HASH_ENTRIES - 1)
#define HASH_FUNC(hash_tab, addr) \
((block_desc **)(hash_tab))[(addr) & HASH_MASK]
-static void REGPARM(2) (*sh2_drc_entry)(const void *block, SH2 *sh2);
-static void (*sh2_drc_exit)(void);
+static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
+static void (*sh2_drc_dispatcher)(void);
+static void (*sh2_drc_exit)(void);
+static void (*sh2_drc_test_irq)(void);
+static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
+static void REGPARM(2) (*sh2_drc_write8_slot)(u32 a, u32 d);
+static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
+static void REGPARM(2) (*sh2_drc_write16_slot)(u32 a, u32 d);
-// tmp
extern void REGPARM(2) sh2_do_op(SH2 *sh2, int opcode);
-static void REGPARM(1) sh2_test_irq(SH2 *sh2);
static void flush_tcache(int tcid)
{
#endif
}
+// add block links (tracked branches)
+static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id)
+{
+ block_link *bl = block_links[tcache_id];
+ int cnt = block_link_counts[tcache_id];
+
+ if (cnt >= block_max_counts[tcache_id] * 2) {
+ printf("bl overflow for tcache %d\n", tcache_id);
+ return -1;
+ }
+
+ bl[cnt].target_pc = target_pc;
+ bl[cnt].jump = jump;
+ block_link_counts[tcache_id]++;
+
+ return 0;
+}
+
static void *dr_find_block(block_desc *tab, u32 addr)
{
for (tab = tab->next; tab != NULL; tab = tab->next)
int *bcount = &block_counts[tcache_id];
block_desc *bd;
- if (*bcount >= block_max_counts[tcache_id])
+ if (*bcount >= block_max_counts[tcache_id]) {
+ printf("bd overflow for tcache %d\n", tcache_id);
return NULL;
+ }
bd = &block_tables[tcache_id][*bcount];
bd->addr = addr;
return bd;
}
+#define ADD_TO_ARRAY(array, count, item, failcode) \
+ array[count++] = item; \
+ if (count >= ARRAY_SIZE(array)) { \
+ printf("warning: " #array " overflow\n"); \
+ failcode; \
+ }
+
int find_in_array(u32 *array, size_t size, u32 what)
{
size_t i;
// reg cache must be clean before call
static int emit_memhandler_read(int size)
{
- int ctxr;
- host_arg2reg(ctxr, 1);
- emith_move_r_r(ctxr, CONTEXT_REG);
- switch (size) {
- case 0: // 8
- emith_call(p32x_sh2_read8);
- break;
- case 1: // 16
- emith_call(p32x_sh2_read16);
- break;
- case 2: // 32
- emith_call(p32x_sh2_read32);
- break;
+ int arg0, arg1;
+ host_arg2reg(arg0, 0);
+
+ // must writeback cycles for poll detection stuff
+ if (reg_map_g2h[SHR_SR] != -1)
+ emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
+ arg1 = rcache_get_tmp_arg(1);
+ emith_move_r_r(arg1, CONTEXT_REG);
+
+#if 1
+ if (Pico.rom == (void *)0x02000000 && Pico32xMem->sdram == (void *)0x06000000) {
+ int tmp = rcache_get_tmp();
+ emith_and_r_r_imm(tmp, arg0, 0xfb000000);
+ emith_cmp_r_imm(tmp, 0x02000000);
+ switch (size) {
+ case 0: // 8
+ EMITH_SJMP3_START(DCOND_NE);
+ emith_eor_r_imm_c(DCOND_EQ, arg0, 1);
+ emith_read8_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
+ EMITH_SJMP3_MID(DCOND_NE);
+ emith_call_cond(DCOND_NE, p32x_sh2_read8);
+ EMITH_SJMP3_END();
+ break;
+ case 1: // 16
+ EMITH_SJMP3_START(DCOND_NE);
+ emith_read16_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
+ EMITH_SJMP3_MID(DCOND_NE);
+ emith_call_cond(DCOND_NE, p32x_sh2_read16);
+ EMITH_SJMP3_END();
+ break;
+ case 2: // 32
+ EMITH_SJMP3_START(DCOND_NE);
+ emith_read_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
+ emith_ror_c(DCOND_EQ, arg0, arg0, 16);
+ EMITH_SJMP3_MID(DCOND_NE);
+ emith_call_cond(DCOND_NE, p32x_sh2_read32);
+ EMITH_SJMP3_END();
+ break;
+ }
+ }
+ else
+#endif
+ {
+ switch (size) {
+ case 0: // 8
+ emith_call(p32x_sh2_read8);
+ break;
+ case 1: // 16
+ emith_call(p32x_sh2_read16);
+ break;
+ case 2: // 32
+ emith_call(p32x_sh2_read32);
+ break;
+ }
}
rcache_invalidate();
// assuming arg0 and retval reg matches
return rcache_get_tmp_arg(0);
}
-static void emit_memhandler_write(int size)
+static void emit_memhandler_write(int size, u32 pc, int delay)
{
int ctxr;
host_arg2reg(ctxr, 2);
- emith_move_r_r(ctxr, CONTEXT_REG);
switch (size) {
case 0: // 8
- emith_call(p32x_sh2_write8);
+ // XXX: consider inlining sh2_drc_write8
+ if (delay) {
+ emith_call(sh2_drc_write8_slot);
+ } else {
+ emit_move_r_imm32(SHR_PC, pc);
+ rcache_clean();
+ emith_call(sh2_drc_write8);
+ }
break;
case 1: // 16
- emith_call(p32x_sh2_write16);
+ if (delay) {
+ emith_call(sh2_drc_write16_slot);
+ } else {
+ emit_move_r_imm32(SHR_PC, pc);
+ rcache_clean();
+ emith_call(sh2_drc_write16);
+ }
break;
case 2: // 32
+ emith_move_r_r(ctxr, CONTEXT_REG);
emith_call(p32x_sh2_write32);
break;
}
return emit_memhandler_read(size);
}
-// tmp_wr -> @(Rx,Ry)
-static void emit_indirect_indexed_write(int tmp_wr, int rx, int ry, int size)
-{
- int a0, t;
- rcache_clean();
- t = rcache_get_tmp_arg(1);
- emith_move_r_r(t, tmp_wr);
- a0 = rcache_get_reg_arg(0, rx);
- t = rcache_get_reg(ry, RC_GR_READ);
- emith_add_r_r(a0, t);
- emit_memhandler_write(size);
-}
-
// read @Rn, @rm
static void emit_indirect_read_double(u32 *rnr, u32 *rmr, int rn, int rm, int size)
{
}
}
-static void sh2_generate_utils(void)
+static void emit_block_entry(void)
{
- int ctx, blk, tmp;
+ int arg0, arg1, arg2;
- host_arg2reg(blk, 0);
- host_arg2reg(ctx, 1);
- host_arg2reg(tmp, 2);
+ host_arg2reg(arg0, 0);
+ host_arg2reg(arg1, 1);
+ host_arg2reg(arg2, 2);
- // sh2_drc_entry(void *block, SH2 *sh2)
- sh2_drc_entry = (void *)tcache_ptr;
- emith_sh2_drc_entry();
- emith_move_r_r(CONTEXT_REG, ctx); // move ctx, arg1
- emit_do_static_regs(0, tmp);
- emith_jump_reg(blk); // jump arg0
+#if (DRC_DEBUG & 4)
+ emith_move_r_r(arg1, CONTEXT_REG);
+ emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
+ emith_call(sh2_drc_announce_entry);
+ rcache_invalidate();
+#endif
+ emith_tst_r_r(arg0, arg0);
+ EMITH_SJMP_START(DCOND_EQ);
+ emith_jump_reg_c(DCOND_NE, arg0);
+ EMITH_SJMP_END(DCOND_EQ);
+}
- // sh2_drc_exit(void)
- sh2_drc_exit = (void *)tcache_ptr;
- emit_do_static_regs(1, tmp);
- emith_sh2_drc_exit();
+static void REGPARM(3) *lookup_block(u32 pc, int is_slave, int *tcache_id)
+{
+ block_desc *bd = NULL;
+ void *block = NULL;
+ *tcache_id = 0;
+
+ // we have full block id tables for data_array and RAM
+ // BIOS goes to data_array table too
+ if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) {
+ int blkid = Pico32xMem->drcblk_da[is_slave][(pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT];
+ *tcache_id = 1 + is_slave;
+ if (blkid & 1) {
+ bd = &block_tables[*tcache_id][blkid >> 1];
+ block = bd->tcache_ptr;
+ }
+ }
+ // RAM
+ else if ((pc & 0xc6000000) == 0x06000000) {
+ int blkid = Pico32xMem->drcblk_ram[(pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT];
+ if (blkid & 1) {
+ bd = &block_tables[0][blkid >> 1];
+ block = bd->tcache_ptr;
+ }
+ }
+ // ROM
+ else if ((pc & 0xc6000000) == 0x02000000) {
+ bd = HASH_FUNC(hash_table, pc);
- rcache_invalidate();
+ if (bd != NULL) {
+ if (bd->addr == pc)
+ block = bd->tcache_ptr;
+ else
+ block = dr_find_block(bd, pc);
+ }
+ }
+
+#if (DRC_DEBUG & 1)
+ if (bd != NULL)
+ bd->refcount++;
+#endif
+ return block;
+}
+
+void dr_link_blocks(void *target, u32 pc, int tcache_id)
+{
+ block_link *bl = block_links[tcache_id];
+ int cnt = block_link_counts[tcache_id];
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (bl[i].target_pc == pc) {
+ dbg(1, "- link from %p", bl[i].jump);
+ emith_jump_patch(bl[i].jump, target);
+ // XXX: sync ARM caches (old jump should be fine)?
+ }
+ }
+}
+
+void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id)
+{
+ int target_tcache_id;
+ void *target;
+ int ret;
+
+ target = lookup_block(pc, sh2->is_slave, &target_tcache_id);
+ if (target_tcache_id == tcache_id) {
+ // allow linking blocks only from local cache
+ ret = dr_add_block_link(pc, tcache_ptr, tcache_id);
+ if (ret < 0)
+ return NULL;
+ }
+ if (target == NULL || target_tcache_id != tcache_id)
+ target = sh2_drc_dispatcher;
+
+ return target;
}
#define DELAYED_OP \
drcf.use_saved_t = 1; \
}
+#define FLUSH_CYCLES(sr) \
+ if (cycles > 0) { \
+ emith_sub_r_imm(sr, cycles << 12); \
+ cycles = 0; \
+ }
+
#define CHECK_UNHANDLED_BITS(mask) { \
if ((op & (mask)) != 0) \
goto default_; \
if (GET_Fx() >= n) \
goto default_
-#define MAX_LOCAL_BRANCHES 16
+#define MAX_LOCAL_BRANCHES 32
// op_flags: data from 1st pass
#define OP_FLAGS(pc) op_flags[((pc) - base_pc) / 2]
#define OF_DELAY_OP (1 << 0)
-static void *sh2_translate(SH2 *sh2, int tcache_id)
+static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
{
// XXX: maybe use structs instead?
void *branch_target_ptr[MAX_LOCAL_BRANCHES];
void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
int branch_patch_count = 0;
- int branch_patch_cond = -1;
+ int pending_branch_cond = -1;
+ int pending_branch_pc = 0;
u8 op_flags[BLOCK_CYCLE_LIMIT + 1];
struct {
u32 delayed_op:2;
u32 use_saved_t:1; // delayed op modifies T
} drcf = { 0, };
+ // PC of current, first, last, last_target_blk SH2 insn
+ u32 pc, base_pc, end_pc, out_pc;
void *block_entry;
block_desc *this_block;
- u32 pc, base_pc, end_pc; // PC of current, first, last insn
int blkid_main = 0;
u32 tmp, tmp2;
int cycles;
tcache_ptr = tcache_ptrs[tcache_id];
this_block = dr_add_block(base_pc, tcache_id, &blkid_main);
+ if (this_block == NULL)
+ return NULL;
// predict tcache overflow
tmp = tcache_ptr - tcache_bases[tcache_id];
- if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE || this_block == NULL)
+ if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE) {
+ printf("tcache %d overflow\n", tcache_id);
return NULL;
+ }
block_entry = tcache_ptr;
dbg(1, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
tcache_id, blkid_main, base_pc, block_entry);
+ dr_link_blocks(tcache_ptr, base_pc, tcache_id);
+
// 1st pass: scan forward for local branches
memset(op_flags, 0, sizeof(op_flags));
for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT; cycles++, pc += 2) {
op = p32x_sh2_read16(pc, sh2);
if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
+ signed int offs = ((signed int)(op << 20) >> 19);
pc += 2;
OP_FLAGS(pc) |= OF_DELAY_OP;
+ ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 2,);
break;
}
if ((op & 0xf000) == 0) {
op &= 0xff;
- if (op == 0x23 || op == 0x03 || op == 0x0b) { // BRAF, BSRF, RTS
+ if (op == 0x1b) // SLEEP
+ break;
+ if (op == 0x23 || op == 0x03 || op == 0x0b || op == 0x2b) { // BRAF, BSRF, RTS, RTE
pc += 2;
OP_FLAGS(pc) |= OF_DELAY_OP;
break;
signed int offs = ((signed int)(op << 24) >> 23);
if (op & 0x0400)
OP_FLAGS(pc + 2) |= OF_DELAY_OP;
- branch_target_pc[branch_target_count++] = pc + offs + 4;
- if (branch_target_count == MAX_LOCAL_BRANCHES) {
- printf("warning: branch target overflow\n");
- // will only spawn additional blocks
- break;
- }
+ ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc + offs + 4, break);
}
+ if ((op & 0xff00) == 0xc300) // TRAPA
+ break;
}
end_pc = pc;
// -------------------------------------------------
// 2nd pass: actual compilation
+ out_pc = 0;
pc = base_pc;
for (cycles = 0; pc <= end_pc || drcf.delayed_op; )
{
int blkid;
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
- emith_sub_r_imm(sr, cycles << 12);
- cycles = 0;
+ FLUSH_CYCLES(sr);
rcache_flush();
do_host_disasm(tcache_id);
*drcblk = (blkid << 1) | 1;
}
- dbg(1, "=== %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
+ dbg(1, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
tcache_id, blkid, pc, tcache_ptr);
+
+ // since we made a block entry, link any other blocks that jump to current pc
+ dr_link_blocks(tcache_ptr, pc, tcache_id);
}
branch_target_ptr[i] = tcache_ptr;
emith_move_r_imm(tmp3, pc + 2);
emith_add_r_r(tmp, tmp3);
}
+ out_pc = (u32)-1;
cycles++;
goto end_op;
case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
- tmp = rcache_get_reg(GET_Rm(), RC_GR_READ);
- emit_indirect_indexed_write(tmp, SHR_R0, GET_Rn(), op & 3);
+ rcache_clean();
+ tmp = rcache_get_reg_arg(1, GET_Rm());
+ tmp2 = rcache_get_reg_arg(0, SHR_R0);
+ tmp3 = rcache_get_reg(GET_Rn(), RC_GR_READ);
+ emith_add_r_r(tmp2, tmp3);
+ emit_memhandler_write(op & 3, pc, drcf.delayed_op);
goto end_op;
case 0x07:
// MUL.L Rm,Rn 0000nnnnmmmm0111
case 0: // RTS 0000000000001011
DELAYED_OP;
emit_move_r_r(SHR_PC, SHR_PR);
+ out_pc = (u32)-1;
cycles++;
break;
case 1: // SLEEP 0000000000011011
- emit_move_r_imm32(SHR_PC, pc - 2);
tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
emith_clear_msb(tmp, tmp, 20); // clear cycles
- drcf.test_irq = 1;
+ out_pc = out_pc - 2;
cycles = 1;
- break;
+ goto end_op;
case 2: // RTE 0000000000101011
DELAYED_OP;
rcache_clean();
tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
emith_add_r_imm(tmp, 4*2);
drcf.test_irq = 1;
+ out_pc = (u32)-1;
cycles += 3;
break;
default:
tmp = rcache_get_reg_arg(0, GET_Rn());
tmp2 = rcache_get_reg_arg(1, GET_Rm());
emith_add_r_imm(tmp, (op & 0x0f) * 4);
- emit_memhandler_write(2);
+ emit_memhandler_write(2, pc, drcf.delayed_op);
goto end_op;
case 0x02:
rcache_clean();
rcache_get_reg_arg(0, GET_Rn());
rcache_get_reg_arg(1, GET_Rm());
- emit_memhandler_write(op & 3);
+ emit_memhandler_write(op & 3, pc, drcf.delayed_op);
goto end_op;
case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100
case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101
rcache_clean();
rcache_get_reg_arg(0, GET_Rn());
rcache_get_reg_arg(1, GET_Rm());
- emit_memhandler_write(op & 3);
+ emit_memhandler_write(op & 3, pc, drcf.delayed_op);
goto end_op;
case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
rcache_get_reg_arg(0, GET_Rn());
tmp3 = rcache_get_reg_arg(1, tmp);
if (tmp == SHR_SR)
- emith_clear_msb(tmp3, tmp3, 20); // reserved bits defined by ISA as 0
- emit_memhandler_write(2);
+ emith_clear_msb(tmp3, tmp3, 22); // reserved bits defined by ISA as 0
+ emit_memhandler_write(2, pc, drcf.delayed_op);
goto end_op;
case 0x04:
case 0x05:
if (!(op & 0x20))
emit_move_r_imm32(SHR_PR, pc + 2);
emit_move_r_r(SHR_PC, (op >> 8) & 0x0f);
+ out_pc = (u32)-1;
cycles++;
break;
case 1: // TAS.B @Rn 0100nnnn00011011
emith_move_r_r(tmp2, tmp);
rcache_free_tmp(tmp);
rcache_get_reg_arg(0, GET_Rn());
- emit_memhandler_write(0);
+ emit_memhandler_write(0, pc, drcf.delayed_op);
cycles += 3;
break;
default:
tmp2 = rcache_get_reg_arg(1, SHR_R0);
tmp3 = (op & 0x100) >> 8;
emith_add_r_imm(tmp, (op & 0x0f) << tmp3);
- emit_memhandler_write(tmp3);
+ emit_memhandler_write(tmp3, pc, drcf.delayed_op);
goto end_op;
case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
DELAYED_OP;
cycles--;
// fallthrough
- case 0x0900: // BT label 10001001dddddddd
- case 0x0b00: { // BF label 10001011dddddddd
- // jmp_cond ~ cond when guest doesn't jump
- int jmp_cond = (op & 0x0200) ? DCOND_NE : DCOND_EQ;
- int insn_cond = (op & 0x0200) ? DCOND_EQ : DCOND_NE;
- signed int offs = ((signed int)(op << 24) >> 23);
- sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
- if (find_in_array(branch_target_pc, branch_target_count, pc + offs + 2) >= 0) {
- branch_patch_pc[branch_patch_count] = pc + offs + 2;
- branch_patch_cond = insn_cond;
- goto end_op;
- }
-
- // can't resolve branch, cause end of block
- tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE);
- emith_move_r_imm(tmp, pc + (drcf.delayed_op ? 2 : 0));
- emith_tst_r_imm(sr, T);
- EMITH_SJMP_START(jmp_cond);
- if (!drcf.delayed_op)
- offs += 2;
- if (offs < 0) {
- emith_sub_r_imm_c(insn_cond, tmp, -offs);
- } else
- emith_add_r_imm_c(insn_cond, tmp, offs);
- EMITH_SJMP_END(jmp_cond);
+ case 0x0900: // BT label 10001001dddddddd
+ case 0x0b00: // BF label 10001011dddddddd
+ // will handle conditional branches later
+ pending_branch_cond = (op & 0x0200) ? DCOND_EQ : DCOND_NE;
+ i = ((signed int)(op << 24) >> 23);
+ pending_branch_pc = pc + i + 2;
cycles += 2;
- if (!drcf.delayed_op)
- goto end_block_btf;
goto end_op;
- }}
+ }
goto default_;
/////////////////////////////////////////////
case 0x0a:
// BRA label 1010dddddddddddd
DELAYED_OP;
- do_bra:
+ sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
tmp = ((signed int)(op << 20) >> 19);
- emit_move_r_imm32(SHR_PC, pc + tmp + 2);
+ out_pc = pc + tmp + 2;
+ if (tmp == (u32)-4)
+ emith_clear_msb(sr, sr, 20); // burn cycles
cycles++;
break;
// BSR label 1011dddddddddddd
DELAYED_OP;
emit_move_r_imm32(SHR_PR, pc + 2);
- goto do_bra;
+ tmp = ((signed int)(op << 20) >> 19);
+ out_pc = pc + tmp + 2;
+ cycles++;
+ break;
/////////////////////////////////////////////
case 0x0c:
tmp2 = rcache_get_reg_arg(1, SHR_R0);
tmp3 = (op & 0x300) >> 8;
emith_add_r_imm(tmp, (op & 0xff) << tmp3);
- emit_memhandler_write(tmp3);
+ emit_memhandler_write(tmp3, pc, drcf.delayed_op);
goto end_op;
case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
emith_add_r_imm(tmp, 4);
tmp = rcache_get_reg_arg(1, SHR_SR);
emith_clear_msb(tmp, tmp, 22);
- emit_memhandler_write(2);
+ emit_memhandler_write(2, pc, drcf.delayed_op);
// push PC
rcache_get_reg_arg(0, SHR_SP);
tmp = rcache_get_tmp_arg(1);
emith_move_r_imm(tmp, pc);
- emit_memhandler_write(2);
+ emit_memhandler_write(2, pc, drcf.delayed_op);
// obtain new PC
tmp = rcache_get_reg_arg(0, SHR_VBR);
emith_add_r_imm(tmp, (op & 0xff) * 4);
tmp2 = rcache_get_reg(SHR_PC, RC_GR_WRITE);
emith_move_r_r(tmp2, tmp);
rcache_free_tmp(tmp);
+ out_pc = (u32)-1;
cycles += 7;
- goto end_block_btf;
+ goto end_op;
case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
emit_move_r_imm32(SHR_R0, (pc + (op & 0xff) * 4 + 2) & ~3);
goto end_op;
tmp3 = rcache_get_reg_arg(0, SHR_GBR);
tmp4 = rcache_get_reg(SHR_R0, RC_GR_READ);
emith_add_r_r(tmp3, tmp4);
- emit_memhandler_write(0);
+ emit_memhandler_write(0, pc, drcf.delayed_op);
cycles += 2;
goto end_op;
}
}
end_op:
- // block-local conditional branch handling (with/without delay)
- if (branch_patch_cond != -1 && drcf.delayed_op != 2) {
+ // conditional branch handling (with/without delay)
+ if (pending_branch_cond != -1 && drcf.delayed_op != 2)
+ {
+ u32 target_pc = pending_branch_pc;
+ void *target;
+
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
// handle cycles
- emith_sub_r_imm(sr, cycles << 12);
- cycles = 0;
+ FLUSH_CYCLES(sr);
rcache_clean();
-
if (drcf.use_saved_t)
emith_tst_r_imm(sr, T_save);
else
emith_tst_r_imm(sr, T);
- branch_patch_ptr[branch_patch_count] = tcache_ptr;
- emith_jump_patchable(branch_patch_cond);
- drcf.use_saved_t = 0;
- branch_patch_cond = -1;
- branch_patch_count++;
- drcf.delayed_op = 0; // XXX: delayed_op ends block, so must override
- if (branch_patch_count == MAX_LOCAL_BRANCHES) {
- printf("too many local branches\n");
- break;
+ if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0) {
+ // local branch
+ // XXX: jumps back can be linked already
+ branch_patch_pc[branch_patch_count] = target_pc;
+ branch_patch_ptr[branch_patch_count] = tcache_ptr;
+ emith_jump_cond_patchable(pending_branch_cond, tcache_ptr);
+
+ branch_patch_count++;
+ if (branch_patch_count == MAX_LOCAL_BRANCHES) {
+ printf("warning: too many local branches\n");
+ break;
+ }
+ }
+ else {
+ // can't resolve branch locally, make a block exit
+ emit_move_r_imm32(SHR_PC, target_pc);
+ rcache_clean();
+
+ target = dr_prepare_ext_branch(target_pc, sh2, tcache_id);
+ if (target == NULL)
+ return NULL;
+ emith_jump_cond_patchable(pending_branch_cond, target);
}
+
+ drcf.use_saved_t = 0;
+ pending_branch_cond = -1;
}
+
// test irq?
- if (drcf.test_irq && drcf.delayed_op != 2)
- break;
- if (drcf.delayed_op == 1)
- break;
+ // XXX: delay slots..
+ if (drcf.test_irq && drcf.delayed_op != 2) {
+ if (!drcf.delayed_op)
+ emit_move_r_imm32(SHR_PC, pc);
+ sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ FLUSH_CYCLES(sr);
+ rcache_flush();
+ emith_call(sh2_drc_test_irq);
+ drcf.test_irq = 0;
+ }
do_host_disasm(tcache_id);
- }
-
- // delayed_op means some kind of branch - PC already handled
- if (!drcf.delayed_op)
- emit_move_r_imm32(SHR_PC, pc);
- if (drcf.test_irq) {
- rcache_flush();
- emith_pass_arg_r(0, CONTEXT_REG);
- emith_call(sh2_test_irq);
+ if (out_pc != 0 && drcf.delayed_op != 2)
+ break;
}
-end_block_btf:
- this_block->end_addr = pc;
-
tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
- emith_sub_r_imm(tmp, cycles << 12);
+ FLUSH_CYCLES(tmp);
rcache_flush();
- emith_jump(sh2_drc_exit);
+
+ if (out_pc == (u32)-1) {
+ // indirect jump -> back to dispatcher
+ emith_jump(sh2_drc_dispatcher);
+ } else {
+ void *target;
+ if (out_pc == 0)
+ out_pc = pc;
+ emit_move_r_imm32(SHR_PC, out_pc);
+ rcache_flush();
+
+ target = dr_prepare_ext_branch(out_pc, sh2, tcache_id);
+ if (target == NULL)
+ return NULL;
+ emith_jump_patchable(target);
+ }
// link local branches
for (i = 0; i < branch_patch_count; i++) {
void *target;
int t;
- //printf("patch %08x %p\n", branch_patch_pc[i], branch_patch_ptr[i]);
t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
- if (branch_target_ptr[t] != NULL)
- target = branch_target_ptr[t];
- else {
- // flush pc and go back to dispatcher (for now)
+ target = branch_target_ptr[t];
+ if (target == NULL) {
+ // flush pc and go back to dispatcher (should no longer happen)
printf("stray branch to %08x %p\n", branch_patch_pc[i], tcache_ptr);
target = tcache_ptr;
emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
rcache_flush();
- emith_jump(sh2_drc_exit);
+ emith_jump(sh2_drc_dispatcher);
}
emith_jump_patch(branch_patch_ptr[i], target);
}
+ this_block->end_addr = pc;
+
// mark memory blocks as containing compiled code
if (tcache_id != 0) {
// data array, BIOS
#endif
return block_entry;
-/*
-unimplemented:
- // last op
- do_host_disasm(tcache_id);
- exit(1);
-*/
}
-void __attribute__((noinline)) sh2_drc_dispatcher(SH2 *sh2)
+static void sh2_generate_utils(void)
{
- // TODO: need to handle self-caused interrupts
- sh2_test_irq(sh2);
+ int arg0, arg1, arg2, sr, tmp;
+ void *sh2_drc_write_end, *sh2_drc_write_slot_end;
- while (((signed int)sh2->sr >> 12) > 0)
- {
- void *block = NULL;
- block_desc *bd = NULL;
- int tcache_id = 0;
-
- // FIXME: must avoid doing it so often..
- //sh2_test_irq(sh2);
-
- // we have full block id tables for data_array and RAM
- // BIOS goes to data_array table too
- if ((sh2->pc & 0xe0000000) == 0xc0000000 || (sh2->pc & ~0xfff) == 0) {
- int blkid = Pico32xMem->drcblk_da[sh2->is_slave][(sh2->pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT];
- tcache_id = 1 + sh2->is_slave;
- if (blkid & 1) {
- bd = &block_tables[tcache_id][blkid >> 1];
- block = bd->tcache_ptr;
- }
- }
- // RAM
- else if ((sh2->pc & 0xc6000000) == 0x06000000) {
- int blkid = Pico32xMem->drcblk_ram[(sh2->pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT];
- if (blkid & 1) {
- bd = &block_tables[tcache_id][blkid >> 1];
- block = bd->tcache_ptr;
- }
- }
- // ROM
- else if ((sh2->pc & 0xc6000000) == 0x02000000) {
- bd = HASH_FUNC(hash_table, sh2->pc);
-
- if (bd != NULL) {
- if (bd->addr == sh2->pc)
- block = bd->tcache_ptr;
- else
- block = dr_find_block(bd, sh2->pc);
- }
- }
+ host_arg2reg(arg0, 0);
+ host_arg2reg(arg1, 1);
+ host_arg2reg(arg2, 2);
+ emith_move_r_r(arg0, arg0); // nop
- if (block == NULL)
- block = sh2_translate(sh2, tcache_id);
- if (block == NULL) {
- // sh2_translate failed, possibly tcache overflow, clean up and try again
- flush_tcache(tcache_id);
- block = sh2_translate(sh2, tcache_id);
- }
+ // sh2_drc_exit(void)
+ sh2_drc_exit = (void *)tcache_ptr;
+ emit_do_static_regs(1, arg2);
+ emith_sh2_drc_exit();
- dbg(4, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
- sh2->pc, block, (signed int)sh2->sr >> 12);
-#if (DRC_DEBUG & 1)
- if (bd != NULL)
- bd->refcount++;
+ // sh2_drc_dispatcher(void)
+ sh2_drc_dispatcher = (void *)tcache_ptr;
+ sr = rcache_get_reg(SHR_SR, RC_GR_READ);
+ emith_cmp_r_imm(sr, 0);
+ emith_jump_cond(DCOND_LT, sh2_drc_exit);
+ rcache_invalidate();
+ emith_ctx_read(arg0, SHR_PC * 4);
+ emith_ctx_read(arg1, offsetof(SH2, is_slave));
+ emith_add_r_r_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
+ emith_call(lookup_block);
+ emit_block_entry();
+ // lookup failed, call sh2_translate()
+ emith_move_r_r(arg0, CONTEXT_REG);
+ emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
+ emith_call(sh2_translate);
+ emit_block_entry();
+ // sh2_translate() failed, flush cache and retry
+ emith_ctx_read(arg0, offsetof(SH2, drc_tmp));
+ emith_call(flush_tcache);
+ emith_move_r_r(arg0, CONTEXT_REG);
+ emith_ctx_read(arg1, offsetof(SH2, drc_tmp));
+ emith_call(sh2_translate);
+ emit_block_entry();
+ // XXX: can't translate, fail
+ emith_call(exit);
+
+ // sh2_drc_test_irq(void)
+ // assumes it's called from main function (may jump to dispatcher)
+ sh2_drc_test_irq = (void *)tcache_ptr;
+ emith_ctx_read(arg1, offsetof(SH2, pending_level));
+ sr = rcache_get_reg(SHR_SR, RC_GR_READ);
+ emith_lsr(arg0, sr, I_SHIFT);
+ emith_and_r_imm(arg0, 0x0f);
+ emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
+ EMITH_SJMP_START(DCOND_GT);
+ emith_ret_c(DCOND_LE); // nope, return
+ EMITH_SJMP_END(DCOND_GT);
+ // adjust SP
+ tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
+ emith_sub_r_imm(tmp, 4*2);
+ rcache_clean();
+ // push SR
+ tmp = rcache_get_reg_arg(0, SHR_SP);
+ emith_add_r_imm(tmp, 4);
+ tmp = rcache_get_reg_arg(1, SHR_SR);
+ emith_clear_msb(tmp, tmp, 22);
+ emith_move_r_r(arg2, CONTEXT_REG);
+ emith_call(p32x_sh2_write32);
+ rcache_invalidate();
+ // push PC
+ rcache_get_reg_arg(0, SHR_SP);
+ emith_ctx_read(arg1, SHR_PC * 4);
+ emith_move_r_r(arg2, CONTEXT_REG);
+ emith_call(p32x_sh2_write32);
+ rcache_invalidate();
+ // update I, cycles, do callback
+ emith_ctx_read(arg1, offsetof(SH2, pending_level));
+ sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_bic_r_imm(sr, I);
+ emith_or_r_r_lsl(sr, arg1, I_SHIFT);
+ emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
+ rcache_flush();
+ emith_move_r_r(arg0, CONTEXT_REG);
+ emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
+ // obtain new PC
+ emith_lsl(arg0, arg0, 2);
+ emith_ctx_read(arg1, SHR_VBR * 4);
+ emith_add_r_r(arg0, arg1);
+ emit_memhandler_read(2);
+ emith_ctx_write(arg0, SHR_PC * 4);
+#ifdef __i386__
+ emith_add_r_imm(xSP, 4); // fix stack
+#endif
+ emith_jump(sh2_drc_dispatcher);
+ rcache_invalidate();
+
+ // sh2_drc_entry(SH2 *sh2)
+ sh2_drc_entry = (void *)tcache_ptr;
+ emith_sh2_drc_entry();
+ emith_move_r_r(CONTEXT_REG, arg0); // move ctx, arg0
+ emit_do_static_regs(0, arg2);
+ emith_call(sh2_drc_test_irq);
+ emith_jump(sh2_drc_dispatcher);
+
+ // write-caused irq detection
+ sh2_drc_write_end = tcache_ptr;
+ emith_tst_r_r(arg0, arg0);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp)); // return
+ EMITH_SJMP_END(DCOND_NE);
+ // since PC is up to date, jump to it's block instead of returning
+ emith_call(sh2_drc_test_irq);
+ emith_jump_ctx(offsetof(SH2, drc_tmp));
+
+ // write-caused irq detection for writes in delay slot
+ sh2_drc_write_slot_end = tcache_ptr;
+ emith_tst_r_r(arg0, arg0);
+ EMITH_SJMP_START(DCOND_NE);
+ emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp));
+ EMITH_SJMP_END(DCOND_NE);
+ // just burn cycles to get back to dispatcher after branch is handled
+ sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
+ emith_ctx_write(sr, offsetof(SH2, irq_cycles));
+ emith_clear_msb(sr, sr, 20); // clear cycles
+ rcache_flush();
+ emith_jump_ctx(offsetof(SH2, drc_tmp));
+
+ // sh2_drc_write8(u32 a, u32 d)
+ sh2_drc_write8 = (void *)tcache_ptr;
+ emith_ret_to_ctx(offsetof(SH2, drc_tmp));
+ emith_ctx_read(arg2, offsetof(SH2, write8_tab));
+ emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
+
+ // sh2_drc_write16(u32 a, u32 d)
+ sh2_drc_write16 = (void *)tcache_ptr;
+ emith_ret_to_ctx(offsetof(SH2, drc_tmp));
+ emith_ctx_read(arg2, offsetof(SH2, write16_tab));
+ emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
+
+ // sh2_drc_write8_slot(u32 a, u32 d)
+ sh2_drc_write8_slot = (void *)tcache_ptr;
+ emith_ret_to_ctx(offsetof(SH2, drc_tmp));
+ emith_ctx_read(arg2, offsetof(SH2, write8_tab));
+ emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
+
+ // sh2_drc_write16_slot(u32 a, u32 d)
+ sh2_drc_write16_slot = (void *)tcache_ptr;
+ emith_ret_to_ctx(offsetof(SH2, drc_tmp));
+ emith_ctx_read(arg2, offsetof(SH2, write16_tab));
+ emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
+
+ rcache_invalidate();
+#if (DRC_DEBUG & 2)
+ host_dasm_new_symbol(sh2_drc_entry);
+ host_dasm_new_symbol(sh2_drc_dispatcher);
+ host_dasm_new_symbol(sh2_drc_exit);
+ host_dasm_new_symbol(sh2_drc_test_irq);
+ host_dasm_new_symbol(sh2_drc_write_end);
+ host_dasm_new_symbol(sh2_drc_write_slot_end);
+ host_dasm_new_symbol(sh2_drc_write8);
+ host_dasm_new_symbol(sh2_drc_write8_slot);
+ host_dasm_new_symbol(sh2_drc_write16);
+ host_dasm_new_symbol(sh2_drc_write16_slot);
#endif
- sh2_drc_entry(block, sh2);
- dbg(4, "= leave %p", block);
- }
}
static void sh2_smc_rm_block(u16 *drcblk, u16 *p, block_desc *btab, u32 a)
block_desc *bd = btab + id;
// FIXME: skip subblocks; do both directions
+ // FIXME: collect all branches
dbg(1, " killing block %08x", bd->addr);
bd->addr = bd->end_addr = 0;
void sh2_execute(SH2 *sh2c, int cycles)
{
+ int ret_cycles;
sh2 = sh2c; // XXX
sh2c->cycles_aim += cycles;
// others are usual SH2 flags
sh2c->sr &= 0x3f3;
sh2c->sr |= cycles << 12;
- sh2_drc_dispatcher(sh2c);
+ sh2_drc_entry(sh2c);
- sh2c->cycles_done += cycles - ((signed int)sh2c->sr >> 12);
-}
+ // TODO: irq cycles
+ ret_cycles = (signed int)sh2c->sr >> 12;
+ if (ret_cycles > 0)
+ printf("warning: drc returned with cycles: %d\n", ret_cycles);
-static void REGPARM(1) sh2_test_irq(SH2 *sh2)
-{
- if (sh2->pending_level > ((sh2->sr >> 4) & 0x0f))
- {
- if (sh2->pending_irl > sh2->pending_int_irq)
- sh2_do_irq(sh2, sh2->pending_irl, 64 + sh2->pending_irl/2);
- else {
- sh2_do_irq(sh2, sh2->pending_int_irq, sh2->pending_int_vector);
- sh2->pending_int_irq = 0; // auto-clear
- sh2->pending_level = sh2->pending_irl;
- }
- }
+ sh2c->cycles_done += cycles - ret_cycles;
}
#if (DRC_DEBUG & 1)
-static void block_stats(void)
+void block_stats(void)
{
int c, b, i, total = 0;
+ printf("block stats:\n");
for (b = 0; b < ARRAY_SIZE(block_tables); b++)
for (i = 0; i < block_counts[b]; i++)
if (block_tables[b][i].addr != 0)
int sh2_drc_init(SH2 *sh2)
{
- if (block_tables[0] == NULL) {
- int i, cnt;
-
- drc_cmn_init();
+ int i;
- cnt = block_max_counts[0] + block_max_counts[1] + block_max_counts[2];
- block_tables[0] = calloc(cnt, sizeof(*block_tables[0]));
- if (block_tables[0] == NULL)
- return -1;
+ if (block_tables[0] == NULL)
+ {
+ for (i = 0; i < TCACHE_BUFFERS; i++) {
+ block_tables[i] = calloc(block_max_counts[i], sizeof(*block_tables[0]));
+ if (block_tables[i] == NULL)
+ goto fail;
+ // max 2 block links (exits) per block
+ block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0]));
+ if (block_links[i] == NULL)
+ goto fail;
+ }
+ memset(block_counts, 0, sizeof(block_counts));
+ memset(block_link_counts, 0, sizeof(block_link_counts));
+ drc_cmn_init();
tcache_ptr = tcache;
sh2_generate_utils();
#ifdef ARM
cache_flush_d_inval_i(tcache, tcache_ptr);
#endif
- memset(block_counts, 0, sizeof(block_counts));
tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
-
- for (i = 1; i < ARRAY_SIZE(block_tables); i++) {
- block_tables[i] = block_tables[i - 1] + block_max_counts[i - 1];
+ for (i = 1; i < ARRAY_SIZE(tcache_bases); i++)
tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
- }
// tmp
PicoOpt |= POPT_DIS_VDP_FIFO;
if (hash_table == NULL) {
hash_table = calloc(sizeof(hash_table[0]), MAX_HASH_ENTRIES);
if (hash_table == NULL)
- return -1;
+ goto fail;
}
return 0;
+
+fail:
+ sh2_drc_finish(sh2);
+ return -1;
}
void sh2_drc_finish(SH2 *sh2)
{
+ int i;
+
if (block_tables[0] != NULL) {
block_stats();
- free(block_tables[0]);
- memset(block_tables, 0, sizeof(block_tables));
+
+ for (i = 0; i < TCACHE_BUFFERS; i++) {
+#if (DRC_DEBUG & 2)
+ printf("~~~ tcache %d\n", i);
+ tcache_dsm_ptrs[i] = tcache_bases[i];
+ tcache_ptr = tcache_ptrs[i];
+ do_host_disasm(i);
+#endif
+
+ if (block_tables[i] != NULL)
+ free(block_tables[i]);
+ block_tables[i] = NULL;
+ if (block_links[i] == NULL)
+ free(block_links[i]);
+ block_links[i] = NULL;
+ }
drc_cmn_cleanup();
}