#define LINK_BRANCHES 1
// limits (per block)
-#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
+#define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6)
// max literal offset from the block end
#define MAX_LITERAL_OFFSET 32*2
-#define MAX_LITERALS (BLOCK_CYCLE_LIMIT / 4)
+#define MAX_LITERALS (BLOCK_INSN_LIMIT / 4)
#define MAX_LOCAL_BRANCHES 32
///
#ifdef DRC_SH2
// debug stuff
-// 1 - ?
-// 2 - ?
-// 4 - log asm
+// 1 - warnings/errors
+// 2 - block info/smc
+// 4 - asm
+// 8 - runtime entries
// {
#ifndef DRC_DEBUG
#define DRC_DEBUG 0
// and can be discarded early
// XXX: need to tune sizes
static const int tcache_sizes[TCACHE_BUFFERS] = {
- DRC_TCACHE_SIZE * 6 / 8, // ROM, DRAM
+ DRC_TCACHE_SIZE * 6 / 8, // ROM (rarely used), DRAM
DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2
DRC_TCACHE_SIZE / 8, // ... slave
};
// ptr for code emiters
static u8 *tcache_ptr;
-typedef struct block_desc_ {
+struct block_desc {
u32 addr; // SH2 PC address
u32 end_addr; // address after last op
void *tcache_ptr; // translated block for above PC
- struct block_desc_ *next; // next block with the same PC hash
+ struct block_desc *next; // next block with the same PC hash
#if (DRC_DEBUG & 2)
int refcount;
#endif
-} block_desc;
+};
-typedef struct block_link_ {
+struct block_link {
u32 target_pc;
void *jump; // insn address
// struct block_link_ *next;
-} block_link;
+};
static const int block_max_counts[TCACHE_BUFFERS] = {
4*1024,
256,
256,
};
-static block_desc *block_tables[TCACHE_BUFFERS];
-static block_link *block_links[TCACHE_BUFFERS];
+static struct block_desc *block_tables[TCACHE_BUFFERS];
+static struct block_link *block_links[TCACHE_BUFFERS];
static int block_counts[TCACHE_BUFFERS];
static int block_link_counts[TCACHE_BUFFERS];
-#define BLOCKID_OVERLAP 0xfffe
-#define BLOCKID_MAX block_max_counts[0]
+// used for invalidation
+static const int ram_sizes[TCACHE_BUFFERS] = {
+ 0x40000,
+ 0x1000,
+ 0x1000,
+};
+#define ADDR_TO_BLOCK_PAGE 0x100
+
+struct block_list {
+ struct block_desc *block;
+ struct block_list *next;
+};
+
+// array of pointers to block_lists for RAM and 2 data arrays
+// each array has len: sizeof(mem) / ADDR_TO_BLOCK_PAGE
+static struct block_list **inval_lookup[TCACHE_BUFFERS];
// host register tracking
enum {
// ROM hash table
#define MAX_HASH_ENTRIES 1024
#define HASH_MASK (MAX_HASH_ENTRIES - 1)
-static void **hash_table;
+static struct block_desc **hash_table;
#define HASH_FUNC(hash_tab, addr) \
- ((block_desc **)(hash_tab))[(addr) & HASH_MASK]
+ (hash_tab)[(addr) & HASH_MASK]
static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
static void (*sh2_drc_dispatcher)(void);
return poffs;
}
-static block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id)
+static struct block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id)
{
*tcache_id = 0;
}
// ROM
else if ((pc & 0xc6000000) == 0x02000000) {
- block_desc *bd = HASH_FUNC(hash_table, pc);
+ struct block_desc *bd = HASH_FUNC(hash_table, pc);
for (; bd != NULL; bd = bd->next)
if (bd->addr == pc)
// ---------------------------------------------------------------
// block management
+static void add_to_block_list(struct block_list **blist, struct block_desc *block)
+{
+ struct block_list *added = malloc(sizeof(*added));
+ if (!added) {
+ elprintf(EL_ANOMALY, "drc OOM (1)");
+ return;
+ }
+ added->block = block;
+ added->next = *blist;
+ *blist = added;
+}
+
+static void rm_from_block_list(struct block_list **blist, struct block_desc *block)
+{
+ struct block_list *prev = NULL, *current = *blist;
+ for (; current != NULL; prev = current, current = current->next) {
+ if (current->block == block) {
+ if (prev == NULL)
+ *blist = current->next;
+ else
+ prev->next = current->next;
+ free(current);
+ return;
+ }
+ }
+ dbg(1, "can't rm block %p (%08x-%08x)",
+ block, block->addr, block->end_addr);
+}
+
+static void rm_block_list(struct block_list **blist)
+{
+ struct block_list *tmp, *current = *blist;
+ while (current != NULL) {
+ tmp = current;
+ current = current->next;
+ free(tmp);
+ }
+ *blist = NULL;
+}
+
static void REGPARM(1) flush_tcache(int tcid)
{
+ int i;
+
dbg(1, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid,
tcache_ptrs[tcid] - tcache_bases[tcid], tcache_sizes[tcid],
block_counts[tcid], block_max_counts[tcid]);
#if (DRC_DEBUG & 4)
tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
#endif
+
+ for (i = 0; i < ram_sizes[tcid] / ADDR_TO_BLOCK_PAGE; i++)
+ rm_block_list(&inval_lookup[tcid][i]);
}
#if LINK_BRANCHES
// add block links (tracked branches)
static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id)
{
- block_link *bl = block_links[tcache_id];
+ struct block_link *bl = block_links[tcache_id];
int cnt = block_link_counts[tcache_id];
if (cnt >= block_max_counts[tcache_id] * 2) {
}
#endif
-static block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
+static struct block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
{
- block_desc *bd;
+ struct block_desc *bd;
int tcache_id;
int *bcount;
static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id)
{
- block_desc *bd = NULL;
+ struct block_desc *bd = NULL;
void *block = NULL;
bd = dr_get_bd(pc, is_slave, tcache_id);
static void dr_link_blocks(void *target, u32 pc, int tcache_id)
{
#if LINK_BRANCHES
- block_link *bl = block_links[tcache_id];
+ struct block_link *bl = block_links[tcache_id];
int cnt = block_link_counts[tcache_id];
int i;
int literal_addr_count = 0;
int pending_branch_cond = -1;
int pending_branch_pc = 0;
- u8 op_flags[BLOCK_CYCLE_LIMIT];
+ u8 op_flags[BLOCK_INSN_LIMIT];
struct {
u32 delayed_op:2;
u32 test_irq:1;
// PC of current, first, last, last_target_blk SH2 insn
u32 pc, base_pc, end_pc, out_pc;
void *block_entry;
- block_desc *this_block;
+ struct block_desc *this_block;
u16 *dr_pc_base;
int blkid_main = 0;
int skip_op = 0;
return NULL;
block_entry = tcache_ptr;
- dbg(2, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
- tcache_id, blkid_main, base_pc, block_entry);
+ dbg(2, "== %csh2 block #%d,%d %08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
+ tcache_id, blkid_main, base_pc, end_pc, block_entry);
dr_link_blocks(tcache_ptr, base_pc, tcache_id);
// collect branch_targets that don't land on delay slots
- for (pc = base_pc; pc <= end_pc; pc += 2) {
- if (!(OP_FLAGS(pc) & OF_TARGET))
+ for (pc = base_pc; pc < end_pc; pc += 2) {
+ if (!(OP_FLAGS(pc) & OF_BTARGET))
continue;
if (OP_FLAGS(pc) & OF_DELAY_OP) {
- OP_FLAGS(pc) &= ~OF_TARGET;
+ OP_FLAGS(pc) &= ~OF_BTARGET;
continue;
}
ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
op = FETCH_OP(pc);
- if ((OP_FLAGS(pc) & OF_TARGET) || pc == base_pc)
+ if ((OP_FLAGS(pc) & OF_BTARGET) || pc == base_pc)
{
i = find_in_array(branch_target_pc, branch_target_count, pc);
if (pc != base_pc)
{
/* make "subblock" - just a mid-block entry */
- block_desc *subblock;
+ struct block_desc *subblock;
sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
FLUSH_CYCLES(sr);
#endif
#if (DRC_DEBUG & 4)
DasmSH2(sh2dasm_buff, pc, op);
- printf("%08x %04x %s\n", pc, op, sh2dasm_buff);
+ printf("%c%08x %04x %s\n", (OP_FLAGS(pc) & OF_BTARGET) ? '*' : ' ',
+ pc, op, sh2dasm_buff);
#endif
#ifdef DRC_CMP
//if (out_pc != 0 && out_pc != (u32)-1)
if (tcache_id != 0 || (this_block->addr & 0xc7fc0000) == 0x06000000)
{
u16 *p, *drc_ram_blk = NULL;
- u32 mask = 0, shift = 0;
+ u32 addr, mask = 0, shift = 0;
if (tcache_id != 0) {
// data array, BIOS
drc_ram_blk = Pico32xMem->drcblk_da[sh2->is_slave];
shift = SH2_DRCBLK_DA_SHIFT;
- mask = 0xfff/2;
+ mask = 0xfff;
}
else if ((this_block->addr & 0xc7fc0000) == 0x06000000) {
// SDRAM
drc_ram_blk = Pico32xMem->drcblk_ram;
shift = SH2_DRCBLK_RAM_SHIFT;
- mask = 0x3ffff/2;
+ mask = 0x3ffff;
}
- drc_ram_blk[(base_pc >> shift) & mask] = (blkid_main << 1) | 1;
+ drc_ram_blk[(base_pc & mask) >> shift] = (blkid_main << 1) | 1;
for (pc = base_pc + 2; pc < end_pc; pc += 2) {
- p = &drc_ram_blk[(pc >> shift) & mask];
- if (*p && *p != (blkid_main << 1))
- *p = BLOCKID_OVERLAP; // block intersection..
- else
- *p = blkid_main << 1;
+ p = &drc_ram_blk[(pc & mask) >> shift];
+ *p = blkid_main << 1;
}
// mark block entries (used by dr_get_bd())
for (i = 0; i < branch_target_count; i++)
if (branch_target_blkid[i] != 0)
- drc_ram_blk[(branch_target_pc[i] >> shift) & mask] =
+ drc_ram_blk[(branch_target_pc[i] & mask) >> shift] =
(branch_target_blkid[i] << 1) | 1;
// mark literals
for (i = 0; i < literal_addr_count; i++) {
tmp = literal_addr[i];
- p = &drc_ram_blk[(tmp >> shift) & mask];
- if (*p && *p != (blkid_main << 1))
- *p = BLOCKID_OVERLAP;
- else
- *p = blkid_main << 1;
+ p = &drc_ram_blk[(tmp & mask) >> shift];
+ *p = blkid_main << 1;
if (!(tmp & 3) && shift == 1)
p[1] = p[0]; // assume long
}
+
+ // add to invalidation lookup lists
+ addr = base_pc & ~(ADDR_TO_BLOCK_PAGE - 1);
+ for (; addr < end_pc + MAX_LITERAL_OFFSET; addr += ADDR_TO_BLOCK_PAGE) {
+ i = (addr & mask) / ADDR_TO_BLOCK_PAGE;
+ add_to_block_list(&inval_lookup[tcache_id][i], this_block);
+ }
}
tcache_ptrs[tcache_id] = tcache_ptr;
do_host_disasm(tcache_id);
dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
- tcache_id, block_counts[tcache_id],
+ tcache_id, blkid_main,
tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
- insns_compiled, host_insn_count, (double)host_insn_count / insns_compiled);
+ insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM
dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
/*
#endif
}
-static void sh2_smc_rm_block_entry(block_desc *bd, int tcache_id)
+static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 ram_mask)
{
void *tmp;
+ u32 i, addr;
// XXX: kill links somehow?
dbg(2, " killing entry %08x-%08x, blkid %d,%d",
return;
}
+ // remove from inval_lookup
+ addr = bd->addr & ~(ADDR_TO_BLOCK_PAGE - 1);
+ for (; addr < bd->end_addr; addr += ADDR_TO_BLOCK_PAGE) {
+ i = (addr & ram_mask) / ADDR_TO_BLOCK_PAGE;
+ rm_from_block_list(&inval_lookup[tcache_id][i], bd);
+ }
+
// since we never reuse space of dead blocks,
// insert jump to dispatcher for blocks that are linked to this point
//emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher);
static void sh2_smc_rm_block(u32 a, u16 *drc_ram_blk, int tcache_id, u32 shift, u32 mask)
{
- block_desc *btab = block_tables[tcache_id];
- u16 *p = drc_ram_blk + ((a & mask) >> shift);
- u16 *pmax = drc_ram_blk + (mask >> shift);
- u32 id = ~0, end_addr;
- int max_zeros = MAX_LITERAL_OFFSET >> shift;
- int i, zeros;
-
- if (*p == 0 || (*p >> 1) >= BLOCKID_MAX) {
- u32 from = ~0, to = 0;
- dbg(1, "slow-remove blocks at @%08x", a);
- for (i = 0; i < block_counts[tcache_id]; i++) {
- if (btab[i].addr <= a && a < btab[i].end_addr) {
- if (btab[i].addr < from)
- from = btab[i].addr;
- if (btab[i].end_addr > to)
- to = btab[i].end_addr;
- sh2_smc_rm_block_entry(&btab[i], tcache_id);
- }
- }
- if (from < to) {
- p = drc_ram_blk + ((from & mask) >> shift);
- memset(p, 0, (to - from) >> (shift - 1));
- }
- return;
- }
-
- // use end_addr to distinguish the same block
- end_addr = btab[*p >> 1].end_addr;
-
- // go up to the start
- for (zeros = 0; p > drc_ram_blk && zeros < max_zeros; p--) {
- // there can be holes because games sometimes keep variables
- // directly in literal pool and we don't inline them
- // to avoid recompile (Star Wars Arcade)
- if (p[-1] == 0) {
- zeros++;
+ struct block_list **blist = NULL, *entry;
+ u32 from = ~0, to = 0;
+ struct block_desc *block;
+
+ blist = &inval_lookup[tcache_id][(a & mask) / ADDR_TO_BLOCK_PAGE];
+ entry = *blist;
+ while (entry != NULL) {
+ block = entry->block;
+ if (block->addr <= a && a < block->end_addr) {
+ if (block->addr < from)
+ from = block->addr;
+ if (block->end_addr > to)
+ to = block->end_addr;
+
+ sh2_smc_rm_block_entry(block, tcache_id, mask);
+
+ // entry lost, restart search
+ entry = *blist;
continue;
}
- zeros = 0;
- if ((p[-1] >> 1) >= BLOCKID_MAX)
- break;
- if (btab[p[-1] >> 1].end_addr != end_addr)
- break;
+ entry = entry->next;
}
- if (!(*p & 1))
- dbg(1, "smc rm: missing block start for %08x?", a);
-
- // now go down and kill everything
- for (zeros = 0; p < pmax && zeros < max_zeros; p++) {
- if (*p == 0) {
- zeros++;
- continue;
- }
- zeros = 0;
- if ((*p >> 1) >= BLOCKID_MAX)
- break;
- if ((*p >> 1) == id) {
- *p = 0;
- continue;
- }
- id = *p >> 1;
- if (btab[id].end_addr != end_addr)
- break;
- *p = 0;
- sh2_smc_rm_block_entry(&btab[id], tcache_id);
+ // clear entry points
+ if (from < to) {
+ u16 *p = drc_ram_blk + ((from & mask) >> shift);
+ memset(p, 0, (to - from) >> (shift - 1));
}
}
total += block_tables[b][i].refcount;
for (c = 0; c < 10; c++) {
- block_desc *blk, *maxb = NULL;
+ struct block_desc *blk, *maxb = NULL;
int max = 0;
for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
for (i = 0; i < block_counts[b]; i++) {
block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0]));
if (block_links[i] == NULL)
goto fail;
+
+ inval_lookup[i] = calloc(ram_sizes[i] / ADDR_TO_BLOCK_PAGE,
+ sizeof(inval_lookup[0]));
+ if (inval_lookup[i] == NULL)
+ goto fail;
}
memset(block_counts, 0, sizeof(block_counts));
memset(block_link_counts, 0, sizeof(block_link_counts));
{
int i;
+ sh2_drc_flush_all();
+
if (block_tables[0] != NULL) {
block_stats();
if (block_links[i] == NULL)
free(block_links[i]);
block_links[i] = NULL;
+
+ if (inval_lookup[i] == NULL)
+ free(inval_lookup[i]);
+ inval_lookup[i] = NULL;
}
drc_cmn_cleanup();
u32 pc, target, op;
int cycles;
- memset(op_flags, 0, BLOCK_CYCLE_LIMIT);
+ memset(op_flags, 0, BLOCK_INSN_LIMIT);
dr_pc_base = dr_get_pc_base(base_pc, is_slave);
- for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT-1; cycles++, pc += 2) {
+ for (cycles = 0, pc = base_pc; cycles < BLOCK_INSN_LIMIT-1; cycles++, pc += 2) {
op = FETCH_OP(pc);
if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
signed int offs = ((signed int)(op << 20) >> 19);
pc += 2;
OP_FLAGS(pc) |= OF_DELAY_OP;
target = pc + offs + 2;
- if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
- OP_FLAGS(target) |= OF_TARGET;
+ if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2)
+ OP_FLAGS(target) |= OF_BTARGET;
break;
}
if ((op & 0xf000) == 0) {
if (op & 0x0400)
OP_FLAGS(pc + 2) |= OF_DELAY_OP;
target = pc + offs + 4;
- if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
- OP_FLAGS(target) |= OF_TARGET;
+ if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2)
+ OP_FLAGS(target) |= OF_BTARGET;
}
if ((op & 0xff00) == 0xc300) // TRAPA
break;