/*
* SH2 recompiler
- * (C) notaz, 2009,2010
+ * (C) notaz, 2009,2010,2013
*
* This work is licensed under the terms of MAME license.
* See COPYING file in the top-level directory.
* notes:
* - tcache, block descriptor, link buffer overflows result in sh2_translate()
* failure, followed by full tcache invalidation for that region
- * - jumps between blocks are tracked for SMC handling (in block_links[]),
+ * - jumps between blocks are tracked for SMC handling (in block_entry->links),
* except jumps between different tcaches
- * - non-main block entries are called subblocks, as they have same tracking
- * structures that main blocks have.
*
* implemented:
* - static register allocation
#define LINK_BRANCHES 1
// limits (per block)
-#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
+#define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6)
// max literal offset from the block end
#define MAX_LITERAL_OFFSET 32*2
-#define MAX_LITERALS (BLOCK_CYCLE_LIMIT / 4)
+#define MAX_LITERALS (BLOCK_INSN_LIMIT / 4)
#define MAX_LOCAL_BRANCHES 32
///
#ifdef DRC_SH2
// debug stuff
-// 1 - ?
-// 2 - ?
-// 4 - log asm
+// 1 - warnings/errors
+// 2 - block info/smc
+// 4 - asm
+// 8 - runtime block entry log
// {
#ifndef DRC_DEBUG
#define DRC_DEBUG 0
// ptr for code emiters
static u8 *tcache_ptr;
-typedef struct block_desc_ {
- u32 addr; // SH2 PC address
- u32 end_addr; // address after last op
+#define MAX_BLOCK_ENTRIES (BLOCK_INSN_LIMIT / 8)
+
+struct block_link {
+ u32 target_pc;
+ void *jump; // insn address
+ struct block_link *next; // either in block_entry->links or
+};
+
+struct block_entry {
+ u32 pc;
void *tcache_ptr; // translated block for above PC
- struct block_desc_ *next; // next block with the same PC hash
+ struct block_entry *next; // next block in hash_table with same pc hash
+ struct block_link *links; // links to this entry
#if (DRC_DEBUG & 2)
- int refcount;
+ struct block_desc *block;
#endif
-} block_desc;
+};
-typedef struct block_link_ {
- u32 target_pc;
- void *jump; // insn address
-// struct block_link_ *next;
-} block_link;
+struct block_desc {
+ u32 addr; // block start SH2 PC address
+ u32 end_addr; // address after last op or literal
+#if (DRC_DEBUG & 2)
+ int refcount;
+#endif
+ int entry_count;
+ struct block_entry entryp[MAX_BLOCK_ENTRIES];
+};
static const int block_max_counts[TCACHE_BUFFERS] = {
4*1024,
256,
256,
};
-static block_desc *block_tables[TCACHE_BUFFERS];
-static block_link *block_links[TCACHE_BUFFERS];
+static struct block_desc *block_tables[TCACHE_BUFFERS];
static int block_counts[TCACHE_BUFFERS];
-static int block_link_counts[TCACHE_BUFFERS];
+
+// we have block_link_pool to avoid using mallocs
+static const int block_link_pool_max_counts[TCACHE_BUFFERS] = {
+ 4*1024,
+ 256,
+ 256,
+};
+static struct block_link *block_link_pool[TCACHE_BUFFERS];
+static int block_link_pool_counts[TCACHE_BUFFERS];
+static struct block_link *unresolved_links[TCACHE_BUFFERS];
// used for invalidation
static const int ram_sizes[TCACHE_BUFFERS] = {
#define ADDR_TO_BLOCK_PAGE 0x100
struct block_list {
- block_desc *block;
+ struct block_desc *block;
struct block_list *next;
};
// each array has len: sizeof(mem) / ADDR_TO_BLOCK_PAGE
static struct block_list **inval_lookup[TCACHE_BUFFERS];
+static const int hash_table_sizes[TCACHE_BUFFERS] = {
+ 0x1000,
+ 0x100,
+ 0x100,
+};
+static struct block_entry **hash_tables[TCACHE_BUFFERS];
+
+#define HASH_FUNC(hash_tab, addr, mask) \
+ (hash_tab)[(((addr) >> 20) ^ ((addr) >> 2)) & (mask)]
+
// host register tracking
enum {
HR_FREE,
#define Q_SHIFT 8
#define M_SHIFT 9
-// ROM hash table
-#define MAX_HASH_ENTRIES 1024
-#define HASH_MASK (MAX_HASH_ENTRIES - 1)
-static void **hash_table;
-
-#define HASH_FUNC(hash_tab, addr) \
- ((block_desc **)(hash_tab))[(addr) & HASH_MASK]
-
static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
static void (*sh2_drc_dispatcher)(void);
static void (*sh2_drc_exit)(void);
return poffs;
}
-static block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id)
+static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
{
- *tcache_id = 0;
-
- // we have full block id tables for data_array and RAM
- // BIOS goes to data_array table too
- if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) {
- int blkid = Pico32xMem->drcblk_da[is_slave][(pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT];
- *tcache_id = 1 + is_slave;
- if (blkid & 1)
- return &block_tables[*tcache_id][blkid >> 1];
- }
- // RAM
- else if ((pc & 0xc6000000) == 0x06000000) {
- int blkid = Pico32xMem->drcblk_ram[(pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT];
- if (blkid & 1)
- return &block_tables[0][blkid >> 1];
- }
- // ROM
- else if ((pc & 0xc6000000) == 0x02000000) {
- block_desc *bd = HASH_FUNC(hash_table, pc);
+ struct block_entry *be;
+ u32 tcid = 0, mask;
- for (; bd != NULL; bd = bd->next)
- if (bd->addr == pc)
- return bd;
- }
+ // data arrays have their own caches
+ if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0)
+ tcid = 1 + is_slave;
+
+ *tcache_id = tcid;
+
+ mask = hash_table_sizes[tcid] - 1;
+ be = HASH_FUNC(hash_tables[tcid], pc, mask);
+ for (; be != NULL; be = be->next)
+ if (be->pc == pc)
+ return be;
return NULL;
}
// ---------------------------------------------------------------
// block management
-static void add_to_block_list(struct block_list **blist, block_desc *block)
+static void add_to_block_list(struct block_list **blist, struct block_desc *block)
{
struct block_list *added = malloc(sizeof(*added));
if (!added) {
*blist = added;
}
-static void rm_from_block_list(struct block_list **blist, block_desc *block)
+static void rm_from_block_list(struct block_list **blist, struct block_desc *block)
{
struct block_list *prev = NULL, *current = *blist;
for (; current != NULL; prev = current, current = current->next) {
block_counts[tcid], block_max_counts[tcid]);
block_counts[tcid] = 0;
- block_link_counts[tcid] = 0;
+ block_link_pool_counts[tcid] = 0;
+ unresolved_links[tcid] = NULL;
+ memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * hash_table_sizes[tcid]);
tcache_ptrs[tcid] = tcache_bases[tcid];
- if (tcid == 0) { // ROM, RAM
- memset(hash_table, 0, sizeof(hash_table[0]) * MAX_HASH_ENTRIES);
- memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
+ if (Pico32xMem != NULL) {
+ if (tcid == 0) // ROM, RAM
+ memset(Pico32xMem->drcblk_ram, 0,
+ sizeof(Pico32xMem->drcblk_ram));
+ else
+ memset(Pico32xMem->drcblk_da[tcid - 1], 0,
+ sizeof(Pico32xMem->drcblk_da[0]));
}
- else
- memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[0]));
#if (DRC_DEBUG & 4)
tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
#endif
rm_block_list(&inval_lookup[tcid][i]);
}
-#if LINK_BRANCHES
-// add block links (tracked branches)
-static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id)
+static void add_to_hashlist(struct block_entry *be, int tcache_id)
{
- block_link *bl = block_links[tcache_id];
- int cnt = block_link_counts[tcache_id];
+ u32 tcmask = hash_table_sizes[tcache_id] - 1;
- if (cnt >= block_max_counts[tcache_id] * 2) {
- dbg(1, "bl overflow for tcache %d\n", tcache_id);
- return -1;
+ be->next = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
+ HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be;
+
+#if (DRC_DEBUG & 2)
+ if (be->next != NULL) {
+ printf(" %08x: hash collision with %08x\n",
+ be->pc, be->next->pc);
+ hash_collisions++;
+ }
+#endif
+}
+
+static void rm_from_hashlist(struct block_entry *be, int tcache_id)
+{
+ u32 tcmask = hash_table_sizes[tcache_id] - 1;
+ struct block_entry *cur, *prev;
+
+ cur = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
+ if (cur == NULL)
+ goto missing;
+
+ if (be == cur) { // first
+ HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be->next;
+ return;
}
- bl[cnt].target_pc = target_pc;
- bl[cnt].jump = jump;
- block_link_counts[tcache_id]++;
+ for (prev = cur, cur = cur->next; cur != NULL; cur = cur->next) {
+ if (cur == be) {
+ prev->next = cur->next;
+ return;
+ }
+ }
- return 0;
+missing:
+ dbg(1, "rm_from_hashlist: be %p %08x missing?", be, be->pc);
}
-#endif
-static block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
+static struct block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
{
- block_desc *bd;
+ struct block_entry *be;
+ struct block_desc *bd;
int tcache_id;
int *bcount;
- bd = dr_get_bd(addr, is_slave, &tcache_id);
- if (bd != NULL) {
- dbg(2, "block override for %08x", addr);
- bd->tcache_ptr = tcache_ptr;
- bd->end_addr = end_addr;
- *blk_id = bd - block_tables[tcache_id];
- return bd;
- }
+ // do a lookup to get tcache_id and override check
+ be = dr_get_entry(addr, is_slave, &tcache_id);
+ if (be != NULL)
+ dbg(1, "block override for %08x", addr);
bcount = &block_counts[tcache_id];
if (*bcount >= block_max_counts[tcache_id]) {
dbg(1, "bd overflow for tcache %d", tcache_id);
return NULL;
}
- if (*bcount == 0)
- (*bcount)++; // not using descriptor 0
bd = &block_tables[tcache_id][*bcount];
bd->addr = addr;
bd->end_addr = end_addr;
- bd->tcache_ptr = tcache_ptr;
- *blk_id = *bcount;
- (*bcount)++;
- if ((addr & 0xc6000000) == 0x02000000) { // ROM
- bd->next = HASH_FUNC(hash_table, addr);
- HASH_FUNC(hash_table, addr) = bd;
+ bd->entry_count = 1;
+ bd->entryp[0].pc = addr;
+ bd->entryp[0].tcache_ptr = tcache_ptr;
+ bd->entryp[0].links = NULL;
#if (DRC_DEBUG & 2)
- if (bd->next != NULL) {
- printf(" hash collision with %08x\n", bd->next->addr);
- hash_collisions++;
- }
+ bd->entryp[0].block = bd;
+ bd->refcount = 0;
#endif
- }
+ add_to_hashlist(&bd->entryp[0], tcache_id);
+
+ *blk_id = *bcount;
+ (*bcount)++;
return bd;
}
static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id)
{
- block_desc *bd = NULL;
+ struct block_entry *be = NULL;
void *block = NULL;
- bd = dr_get_bd(pc, is_slave, tcache_id);
- if (bd != NULL)
- block = bd->tcache_ptr;
+ be = dr_get_entry(pc, is_slave, tcache_id);
+ if (be != NULL)
+ block = be->tcache_ptr;
#if (DRC_DEBUG & 2)
- if (bd != NULL)
- bd->refcount++;
+ if (be != NULL)
+ be->block->refcount++;
#endif
return block;
}
exit(1);
}
-static void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id)
+static void *dr_prepare_ext_branch(u32 pc, int is_slave, int tcache_id)
{
#if LINK_BRANCHES
+ struct block_link *bl = block_link_pool[tcache_id];
+ int cnt = block_link_pool_counts[tcache_id];
+ struct block_entry *be = NULL;
int target_tcache_id;
- void *target;
- int ret;
-
- target = dr_lookup_block(pc, sh2->is_slave, &target_tcache_id);
- if (target_tcache_id == tcache_id) {
- // allow linking blocks only from local cache
- ret = dr_add_block_link(pc, tcache_ptr, tcache_id);
- if (ret < 0)
- return NULL;
+ int i;
+
+ be = dr_get_entry(pc, is_slave, &target_tcache_id);
+ if (target_tcache_id != tcache_id)
+ return sh2_drc_dispatcher;
+
+ // if pool has been freed, reuse
+ for (i = cnt - 1; i >= 0; i--)
+ if (bl[i].target_pc != 0)
+ break;
+ cnt = i + 1;
+ if (cnt >= block_link_pool_max_counts[tcache_id]) {
+ dbg(1, "bl overflow for tcache %d\n", tcache_id);
+ return NULL;
}
- if (target == NULL || target_tcache_id != tcache_id)
- target = sh2_drc_dispatcher;
+ bl += cnt;
+ block_link_pool_counts[tcache_id]++;
- return target;
+ bl->target_pc = pc;
+ bl->jump = tcache_ptr;
+
+ if (be != NULL) {
+ dbg(2, "- early link from %p to pc %08x", bl->jump, pc);
+ bl->next = be->links;
+ be->links = bl;
+ return be->tcache_ptr;
+ }
+ else {
+ bl->next = unresolved_links[tcache_id];
+ unresolved_links[tcache_id] = bl;
+ return sh2_drc_dispatcher;
+ }
#else
return sh2_drc_dispatcher;
#endif
}
-static void dr_link_blocks(void *target, u32 pc, int tcache_id)
+static void dr_link_blocks(struct block_entry *be, int tcache_id)
{
#if LINK_BRANCHES
- block_link *bl = block_links[tcache_id];
- int cnt = block_link_counts[tcache_id];
- int i;
-
- for (i = 0; i < cnt; i++) {
- if (bl[i].target_pc == pc) {
- dbg(2, "- link from %p", bl[i].jump);
- emith_jump_patch(bl[i].jump, target);
- // XXX: sync ARM caches (old jump should be fine)?
+ struct block_link *first = unresolved_links[tcache_id];
+ struct block_link *bl, *prev, *tmp;
+ u32 pc = be->pc;
+
+ for (bl = prev = first; bl != NULL; ) {
+ if (bl->target_pc == pc) {
+ dbg(2, "- link from %p to pc %08x", bl->jump, pc);
+ emith_jump_patch(bl->jump, tcache_ptr);
+
+ // move bl from unresolved_links to block_entry
+ tmp = bl->next;
+ bl->next = be->links;
+ be->links = bl;
+
+ if (bl == first)
+ first = prev = bl = tmp;
+ else
+ prev->next = bl = tmp;
+ continue;
}
+ prev = bl;
+ bl = bl->next;
}
+ unresolved_links[tcache_id] = first;
+
+ // could sync arm caches here, but that's unnecessary
#endif
}
// XXX: maybe use structs instead?
u32 branch_target_pc[MAX_LOCAL_BRANCHES];
void *branch_target_ptr[MAX_LOCAL_BRANCHES];
- int branch_target_blkid[MAX_LOCAL_BRANCHES];
int branch_target_count = 0;
void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
int literal_addr_count = 0;
int pending_branch_cond = -1;
int pending_branch_pc = 0;
- u8 op_flags[BLOCK_CYCLE_LIMIT];
+ u8 op_flags[BLOCK_INSN_LIMIT];
struct {
u32 delayed_op:2;
u32 test_irq:1;
// PC of current, first, last, last_target_blk SH2 insn
u32 pc, base_pc, end_pc, out_pc;
- void *block_entry;
- block_desc *this_block;
+ void *block_entry_ptr;
+ struct block_desc *block;
u16 *dr_pc_base;
int blkid_main = 0;
int skip_op = 0;
u32 tmp, tmp2;
int cycles;
+ int i, v;
int op;
- int i;
base_pc = sh2->pc;
// 1st pass: scan forward for local branches
scan_block(base_pc, sh2->is_slave, op_flags, &end_pc);
- this_block = dr_add_block(base_pc, end_pc + MAX_LITERAL_OFFSET, // XXX
+ block = dr_add_block(base_pc, end_pc + MAX_LITERAL_OFFSET, // XXX
sh2->is_slave, &blkid_main);
- if (this_block == NULL)
+ if (block == NULL)
return NULL;
- block_entry = tcache_ptr;
+ block_entry_ptr = tcache_ptr;
dbg(2, "== %csh2 block #%d,%d %08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
- tcache_id, blkid_main, base_pc, end_pc, block_entry);
+ tcache_id, blkid_main, base_pc, end_pc, block_entry_ptr);
- dr_link_blocks(tcache_ptr, base_pc, tcache_id);
+ dr_link_blocks(&block->entryp[0], tcache_id);
// collect branch_targets that don't land on delay slots
- for (pc = base_pc; pc <= end_pc; pc += 2) {
- if (!(OP_FLAGS(pc) & OF_TARGET))
+ for (pc = base_pc; pc < end_pc; pc += 2) {
+ if (!(OP_FLAGS(pc) & OF_BTARGET))
continue;
if (OP_FLAGS(pc) & OF_DELAY_OP) {
- OP_FLAGS(pc) &= ~OF_TARGET;
+ OP_FLAGS(pc) &= ~OF_BTARGET;
continue;
}
ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
if (branch_target_count > 0) {
memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
- memset(branch_target_blkid, 0, sizeof(branch_target_blkid[0]) * branch_target_count);
}
// -------------------------------------------------
op = FETCH_OP(pc);
- if ((OP_FLAGS(pc) & OF_TARGET) || pc == base_pc)
+ if ((OP_FLAGS(pc) & OF_BTARGET) || pc == base_pc)
{
i = find_in_array(branch_target_pc, branch_target_count, pc);
if (pc != base_pc)
{
- /* make "subblock" - just a mid-block entry */
- block_desc *subblock;
+ // make block entry
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
FLUSH_CYCLES(sr);
rcache_flush();
do_host_disasm(tcache_id);
- dbg(2, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
- tcache_id, branch_target_blkid[i], pc, tcache_ptr);
+ v = block->entry_count;
+ if (v < ARRAY_SIZE(block->entryp)) {
+ block->entryp[v].pc = pc;
+ block->entryp[v].tcache_ptr = tcache_ptr;
+ block->entryp[v].links = NULL;
+#if (DRC_DEBUG & 2)
+ block->entryp[v].block = block;
+#endif
+ add_to_hashlist(&block->entryp[v], tcache_id);
+ block->entry_count++;
- subblock = dr_add_block(pc, end_pc + MAX_LITERAL_OFFSET, // XXX
- sh2->is_slave, &branch_target_blkid[i]);
- if (subblock == NULL)
- return NULL;
+ dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p", sh2->is_slave ? 's' : 'm',
+ tcache_id, blkid_main, pc, tcache_ptr);
- // since we made a block entry, link any other blocks that jump to current pc
- dr_link_blocks(tcache_ptr, pc, tcache_id);
+ // since we made a block entry, link any other blocks
+ // that jump to current pc
+ dr_link_blocks(&block->entryp[v], tcache_id);
+ }
+ else {
+ dbg(1, "too many entryp for block #%d,%d pc=%08x",
+ tcache_id, blkid_main, pc);
+ }
}
if (i >= 0)
branch_target_ptr[i] = tcache_ptr;
#endif
#if (DRC_DEBUG & 4)
DasmSH2(sh2dasm_buff, pc, op);
- printf("%08x %04x %s\n", pc, op, sh2dasm_buff);
+ printf("%c%08x %04x %s\n", (OP_FLAGS(pc) & OF_BTARGET) ? '*' : ' ',
+ pc, op, sh2dasm_buff);
#endif
#ifdef DRC_CMP
//if (out_pc != 0 && out_pc != (u32)-1)
emit_move_r_imm32(SHR_PC, target_pc);
rcache_clean();
- target = dr_prepare_ext_branch(target_pc, sh2, tcache_id);
+ target = dr_prepare_ext_branch(target_pc, sh2->is_slave, tcache_id);
if (target == NULL)
return NULL;
emith_jump_cond_patchable(pending_branch_cond, target);
emit_move_r_imm32(SHR_PC, out_pc);
rcache_flush();
- target = dr_prepare_ext_branch(out_pc, sh2, tcache_id);
+ target = dr_prepare_ext_branch(out_pc, sh2->is_slave, tcache_id);
if (target == NULL)
return NULL;
emith_jump_patchable(target);
// mark memory blocks as containing compiled code
// override any overlay blocks as they become unreachable anyway
- if (tcache_id != 0 || (this_block->addr & 0xc7fc0000) == 0x06000000)
+ if (tcache_id != 0 || (block->addr & 0xc7fc0000) == 0x06000000)
{
- u16 *p, *drc_ram_blk = NULL;
+ u16 *drc_ram_blk = NULL;
u32 addr, mask = 0, shift = 0;
if (tcache_id != 0) {
shift = SH2_DRCBLK_DA_SHIFT;
mask = 0xfff;
}
- else if ((this_block->addr & 0xc7fc0000) == 0x06000000) {
+ else if ((block->addr & 0xc7fc0000) == 0x06000000) {
// SDRAM
drc_ram_blk = Pico32xMem->drcblk_ram;
shift = SH2_DRCBLK_RAM_SHIFT;
mask = 0x3ffff;
}
- drc_ram_blk[(base_pc & mask) >> shift] = (blkid_main << 1) | 1;
- for (pc = base_pc + 2; pc < end_pc; pc += 2) {
- p = &drc_ram_blk[(pc & mask) >> shift];
- *p = blkid_main << 1;
- }
-
- // mark block entries (used by dr_get_bd())
- for (i = 0; i < branch_target_count; i++)
- if (branch_target_blkid[i] != 0)
- drc_ram_blk[(branch_target_pc[i] & mask) >> shift] =
- (branch_target_blkid[i] << 1) | 1;
+ // mark recompiled insns
+ drc_ram_blk[(base_pc & mask) >> shift] = 1;
+ for (pc = base_pc; pc < end_pc; pc += 2)
+ drc_ram_blk[(pc & mask) >> shift] = 1;
// mark literals
for (i = 0; i < literal_addr_count; i++) {
tmp = literal_addr[i];
- p = &drc_ram_blk[(tmp & mask) >> shift];
- *p = blkid_main << 1;
- if (!(tmp & 3) && shift == 1)
- p[1] = p[0]; // assume long
+ drc_ram_blk[(tmp & mask) >> shift] = 1;
}
// add to invalidation lookup lists
addr = base_pc & ~(ADDR_TO_BLOCK_PAGE - 1);
for (; addr < end_pc + MAX_LITERAL_OFFSET; addr += ADDR_TO_BLOCK_PAGE) {
i = (addr & mask) / ADDR_TO_BLOCK_PAGE;
- add_to_block_list(&inval_lookup[tcache_id][i], this_block);
+ add_to_block_list(&inval_lookup[tcache_id][i], block);
}
}
tcache_ptrs[tcache_id] = tcache_ptr;
- host_instructions_updated(block_entry, tcache_ptr);
+ host_instructions_updated(block_entry_ptr, tcache_ptr);
do_host_disasm(tcache_id);
dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
/*
printf("~~~\n");
- tcache_dsm_ptrs[tcache_id] = block_entry;
+ tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
do_host_disasm(tcache_id);
printf("~~~\n");
*/
fflush(stdout);
#endif
- return block_entry;
+ return block_entry_ptr;
}
static void sh2_generate_utils(void)
#endif
}
-static void sh2_smc_rm_block_entry(block_desc *bd, int tcache_id, u32 ram_mask)
+static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 ram_mask)
{
+ struct block_link *bl, *bl_next, *bl_unresolved;
void *tmp;
u32 i, addr;
- // XXX: kill links somehow?
dbg(2, " killing entry %08x-%08x, blkid %d,%d",
bd->addr, bd->end_addr, tcache_id, bd - block_tables[tcache_id]);
- if (bd->addr == 0 || bd->tcache_ptr == NULL) {
+ if (bd->addr == 0 || bd->entry_count == 0) {
dbg(1, " killing dead block!? %08x", bd->addr);
return;
}
rm_from_block_list(&inval_lookup[tcache_id][i], bd);
}
- // since we never reuse space of dead blocks,
- // insert jump to dispatcher for blocks that are linked to this point
- //emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher);
-
- // attempt to handle self-modifying blocks by exiting at nearest known PC
tmp = tcache_ptr;
- tcache_ptr = bd->tcache_ptr;
- emit_move_r_imm32(SHR_PC, bd->addr);
- rcache_flush();
- emith_jump(sh2_drc_dispatcher);
+ bl_unresolved = unresolved_links[tcache_id];
+
+ // remove from hash table, make incoming links unresolved
+ // XXX: maybe patch branches w/flush instead?
+ for (i = 0; i < bd->entry_count; i++) {
+ rm_from_hashlist(&bd->entryp[i], tcache_id);
+
+ // since we never reuse tcache space of dead blocks,
+ // insert jump to dispatcher for blocks that are linked to this
+ tcache_ptr = bd->entryp[i].tcache_ptr;
+ emit_move_r_imm32(SHR_PC, bd->addr);
+ rcache_flush();
+ emith_jump(sh2_drc_dispatcher);
+
+ host_instructions_updated(bd->entryp[i].tcache_ptr, tcache_ptr);
+
+ for (bl = bd->entryp[i].links; bl != NULL; ) {
+ bl_next = bl->next;
+ bl->next = bl_unresolved;
+ bl_unresolved = bl;
+ bl = bl_next;
+ }
+ }
- host_instructions_updated(bd->tcache_ptr, tcache_ptr);
tcache_ptr = tmp;
+ unresolved_links[tcache_id] = bl_unresolved;
bd->addr = bd->end_addr = 0;
+ bd->entry_count = 0;
}
static void sh2_smc_rm_block(u32 a, u16 *drc_ram_blk, int tcache_id, u32 shift, u32 mask)
{
struct block_list **blist = NULL, *entry;
u32 from = ~0, to = 0;
- block_desc *block;
+ struct block_desc *block;
blist = &inval_lookup[tcache_id][(a & mask) / ADDR_TO_BLOCK_PAGE];
entry = *blist;
total += block_tables[b][i].refcount;
for (c = 0; c < 10; c++) {
- block_desc *blk, *maxb = NULL;
+ struct block_desc *blk, *maxb = NULL;
int max = 0;
for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
for (i = 0; i < block_counts[b]; i++) {
if (block_tables[i] == NULL)
goto fail;
// max 2 block links (exits) per block
- block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0]));
- if (block_links[i] == NULL)
+ block_link_pool[i] = calloc(block_link_pool_max_counts[i],
+ sizeof(*block_link_pool[0]));
+ if (block_link_pool[i] == NULL)
goto fail;
inval_lookup[i] = calloc(ram_sizes[i] / ADDR_TO_BLOCK_PAGE,
sizeof(inval_lookup[0]));
if (inval_lookup[i] == NULL)
goto fail;
+
+ hash_tables[i] = calloc(hash_table_sizes[i], sizeof(*hash_tables[0]));
+ if (hash_tables[i] == NULL)
+ goto fail;
}
memset(block_counts, 0, sizeof(block_counts));
- memset(block_link_counts, 0, sizeof(block_link_counts));
+ memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
drc_cmn_init();
tcache_ptr = tcache;
#endif
}
- if (hash_table == NULL) {
- hash_table = calloc(sizeof(hash_table[0]), MAX_HASH_ENTRIES);
- if (hash_table == NULL)
- goto fail;
- }
-
return 0;
fail:
{
int i;
- sh2_drc_flush_all();
+ if (block_tables[0] == NULL)
+ return;
- if (block_tables[0] != NULL) {
- block_stats();
+ sh2_drc_flush_all();
- for (i = 0; i < TCACHE_BUFFERS; i++) {
+ for (i = 0; i < TCACHE_BUFFERS; i++) {
#if (DRC_DEBUG & 4)
- printf("~~~ tcache %d\n", i);
- tcache_dsm_ptrs[i] = tcache_bases[i];
- tcache_ptr = tcache_ptrs[i];
- do_host_disasm(i);
+ printf("~~~ tcache %d\n", i);
+ tcache_dsm_ptrs[i] = tcache_bases[i];
+ tcache_ptr = tcache_ptrs[i];
+ do_host_disasm(i);
#endif
- if (block_tables[i] != NULL)
- free(block_tables[i]);
- block_tables[i] = NULL;
- if (block_links[i] == NULL)
- free(block_links[i]);
- block_links[i] = NULL;
+ if (block_tables[i] != NULL)
+ free(block_tables[i]);
+ block_tables[i] = NULL;
+ if (block_link_pool[i] == NULL)
+ free(block_link_pool[i]);
+ block_link_pool[i] = NULL;
- if (inval_lookup[i] == NULL)
- free(inval_lookup[i]);
- inval_lookup[i] = NULL;
- }
+ if (inval_lookup[i] == NULL)
+ free(inval_lookup[i]);
+ inval_lookup[i] = NULL;
- drc_cmn_cleanup();
+ if (hash_tables[i] != NULL) {
+ free(hash_tables[i]);
+ hash_tables[i] = NULL;
+ }
}
- if (hash_table != NULL) {
- free(hash_table);
- hash_table = NULL;
- }
+ drc_cmn_cleanup();
}
#endif /* DRC_SH2 */
u32 pc, target, op;
int cycles;
- memset(op_flags, 0, BLOCK_CYCLE_LIMIT);
+ memset(op_flags, 0, BLOCK_INSN_LIMIT);
dr_pc_base = dr_get_pc_base(base_pc, is_slave);
- for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT-1; cycles++, pc += 2) {
+ for (cycles = 0, pc = base_pc; cycles < BLOCK_INSN_LIMIT-1; cycles++, pc += 2) {
op = FETCH_OP(pc);
if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
signed int offs = ((signed int)(op << 20) >> 19);
pc += 2;
OP_FLAGS(pc) |= OF_DELAY_OP;
target = pc + offs + 2;
- if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
- OP_FLAGS(target) |= OF_TARGET;
+ if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2)
+ OP_FLAGS(target) |= OF_BTARGET;
break;
}
if ((op & 0xf000) == 0) {
if (op & 0x0400)
OP_FLAGS(pc + 2) |= OF_DELAY_OP;
target = pc + offs + 4;
- if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
- OP_FLAGS(target) |= OF_TARGET;
+ if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2)
+ OP_FLAGS(target) |= OF_BTARGET;
}
if ((op & 0xff00) == 0xc300) // TRAPA
break;