X-Git-Url: https://notaz.gp2x.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=cpu%2Fsh2%2Fcompiler.c;h=c57c4b5a6e8d832a1f64234b2f96005931de1c72;hb=8cc76b48865096cff1024d1d7cfb4123bf7daba7;hp=644714e81f641f01ce97d10f102a2030c362a94e;hpb=569420b0d64d27db5f55de50b26d70f130c37cf7;p=picodrive.git diff --git a/cpu/sh2/compiler.c b/cpu/sh2/compiler.c index 644714e..c57c4b5 100644 --- a/cpu/sh2/compiler.c +++ b/cpu/sh2/compiler.c @@ -1,6 +1,6 @@ /* * SH2 recompiler - * (C) notaz, 2009,2010 + * (C) notaz, 2009,2010,2013 * * This work is licensed under the terms of MAME license. * See COPYING file in the top-level directory. @@ -8,10 +8,8 @@ * notes: * - tcache, block descriptor, link buffer overflows result in sh2_translate() * failure, followed by full tcache invalidation for that region - * - jumps between blocks are tracked for SMC handling (in block_links[]), + * - jumps between blocks are tracked for SMC handling (in block_entry->links), * except jumps between different tcaches - * - non-main block entries are called subblocks, as they have same tracking - * structures that main blocks have. * * implemented: * - static register allocation @@ -41,11 +39,11 @@ #define LINK_BRANCHES 1 // limits (per block) -#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6) +#define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6) // max literal offset from the block end #define MAX_LITERAL_OFFSET 32*2 -#define MAX_LITERALS (BLOCK_CYCLE_LIMIT / 4) +#define MAX_LITERALS (BLOCK_INSN_LIMIT / 4) #define MAX_LOCAL_BRANCHES 32 /// @@ -58,9 +56,10 @@ #ifdef DRC_SH2 // debug stuff -// 1 - ? -// 2 - ? -// 4 - log asm +// 1 - warnings/errors +// 2 - block info/smc +// 4 - asm +// 8 - runtime block entry log // { #ifndef DRC_DEBUG #define DRC_DEBUG 0 @@ -112,7 +111,7 @@ static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr) // and can be discarded early // XXX: need to tune sizes static const int tcache_sizes[TCACHE_BUFFERS] = { - DRC_TCACHE_SIZE * 6 / 8, // ROM, DRAM + DRC_TCACHE_SIZE * 6 / 8, // ROM (rarely used), DRAM DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2 DRC_TCACHE_SIZE / 8, // ... slave }; @@ -123,34 +122,78 @@ static u8 *tcache_ptrs[TCACHE_BUFFERS]; // ptr for code emiters static u8 *tcache_ptr; -typedef struct block_desc_ { - u32 addr; // SH2 PC address - u32 end_addr; // address after last op +#define MAX_BLOCK_ENTRIES (BLOCK_INSN_LIMIT / 8) + +struct block_link { + u32 target_pc; + void *jump; // insn address + struct block_link *next; // either in block_entry->links or +}; + +struct block_entry { + u32 pc; void *tcache_ptr; // translated block for above PC - struct block_desc_ *next; // next block with the same PC hash + struct block_entry *next; // next block in hash_table with same pc hash + struct block_link *links; // links to this entry #if (DRC_DEBUG & 2) - int refcount; + struct block_desc *block; #endif -} block_desc; +}; -typedef struct block_link_ { - u32 target_pc; - void *jump; // insn address -// struct block_link_ *next; -} block_link; +struct block_desc { + u32 addr; // block start SH2 PC address + u32 end_addr; // address after last op or literal +#if (DRC_DEBUG & 2) + int refcount; +#endif + int entry_count; + struct block_entry entryp[MAX_BLOCK_ENTRIES]; +}; static const int block_max_counts[TCACHE_BUFFERS] = { 4*1024, 256, 256, }; -static block_desc *block_tables[TCACHE_BUFFERS]; -static block_link *block_links[TCACHE_BUFFERS]; +static struct block_desc *block_tables[TCACHE_BUFFERS]; static int block_counts[TCACHE_BUFFERS]; -static int block_link_counts[TCACHE_BUFFERS]; -#define BLOCKID_OVERLAP 0xfffe -#define BLOCKID_MAX block_max_counts[0] +// we have block_link_pool to avoid using mallocs +static const int block_link_pool_max_counts[TCACHE_BUFFERS] = { + 4*1024, + 256, + 256, +}; +static struct block_link *block_link_pool[TCACHE_BUFFERS]; +static int block_link_pool_counts[TCACHE_BUFFERS]; +static struct block_link *unresolved_links[TCACHE_BUFFERS]; + +// used for invalidation +static const int ram_sizes[TCACHE_BUFFERS] = { + 0x40000, + 0x1000, + 0x1000, +}; +#define ADDR_TO_BLOCK_PAGE 0x100 + +struct block_list { + struct block_desc *block; + struct block_list *next; +}; + +// array of pointers to block_lists for RAM and 2 data arrays +// each array has len: sizeof(mem) / ADDR_TO_BLOCK_PAGE +static struct block_list **inval_lookup[TCACHE_BUFFERS]; + +static const int hash_table_sizes[TCACHE_BUFFERS] = { + 0x1000, + 0x100, + 0x100, +}; +static struct block_entry **hash_tables[TCACHE_BUFFERS]; + +#define HASH_FUNC(hash_tab, addr, mask) \ + (hash_tab)[(((addr) >> 20) ^ ((addr) >> 2)) & (mask)] // host register tracking enum { @@ -231,14 +274,6 @@ static temp_reg_t reg_temp[] = { #define Q_SHIFT 8 #define M_SHIFT 9 -// ROM hash table -#define MAX_HASH_ENTRIES 1024 -#define HASH_MASK (MAX_HASH_ENTRIES - 1) -static void **hash_table; - -#define HASH_FUNC(hash_tab, addr) \ - ((block_desc **)(hash_tab))[(addr) & HASH_MASK] - static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2); static void (*sh2_drc_dispatcher)(void); static void (*sh2_drc_exit)(void); @@ -248,9 +283,7 @@ static u32 REGPARM(2) (*sh2_drc_read8)(u32 a, SH2 *sh2); static u32 REGPARM(2) (*sh2_drc_read16)(u32 a, SH2 *sh2); static u32 REGPARM(2) (*sh2_drc_read32)(u32 a, SH2 *sh2); static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d); -static void REGPARM(2) (*sh2_drc_write8_slot)(u32 a, u32 d); static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d); -static void REGPARM(2) (*sh2_drc_write16_slot)(u32 a, u32 d); static int REGPARM(3) (*sh2_drc_write32)(u32 a, u32 d, SH2 *sh2); // address space stuff @@ -282,32 +315,22 @@ static int dr_ctx_get_mem_ptr(u32 a, u32 *mask) return poffs; } -static block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id) +static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id) { - *tcache_id = 0; - - // we have full block id tables for data_array and RAM - // BIOS goes to data_array table too - if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) { - int blkid = Pico32xMem->drcblk_da[is_slave][(pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT]; - *tcache_id = 1 + is_slave; - if (blkid & 1) - return &block_tables[*tcache_id][blkid >> 1]; - } - // RAM - else if ((pc & 0xc6000000) == 0x06000000) { - int blkid = Pico32xMem->drcblk_ram[(pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT]; - if (blkid & 1) - return &block_tables[0][blkid >> 1]; - } - // ROM - else if ((pc & 0xc6000000) == 0x02000000) { - block_desc *bd = HASH_FUNC(hash_table, pc); + struct block_entry *be; + u32 tcid = 0, mask; - for (; bd != NULL; bd = bd->next) - if (bd->addr == pc) - return bd; - } + // data arrays have their own caches + if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) + tcid = 1 + is_slave; + + *tcache_id = tcid; + + mask = hash_table_sizes[tcid] - 1; + be = HASH_FUNC(hash_tables[tcid], pc, mask); + for (; be != NULL; be = be->next) + if (be->pc == pc) + return be; return NULL; } @@ -315,102 +338,166 @@ static block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id) // --------------------------------------------------------------- // block management +static void add_to_block_list(struct block_list **blist, struct block_desc *block) +{ + struct block_list *added = malloc(sizeof(*added)); + if (!added) { + elprintf(EL_ANOMALY, "drc OOM (1)"); + return; + } + added->block = block; + added->next = *blist; + *blist = added; +} + +static void rm_from_block_list(struct block_list **blist, struct block_desc *block) +{ + struct block_list *prev = NULL, *current = *blist; + for (; current != NULL; prev = current, current = current->next) { + if (current->block == block) { + if (prev == NULL) + *blist = current->next; + else + prev->next = current->next; + free(current); + return; + } + } + dbg(1, "can't rm block %p (%08x-%08x)", + block, block->addr, block->end_addr); +} + +static void rm_block_list(struct block_list **blist) +{ + struct block_list *tmp, *current = *blist; + while (current != NULL) { + tmp = current; + current = current->next; + free(tmp); + } + *blist = NULL; +} + static void REGPARM(1) flush_tcache(int tcid) { + int i; + dbg(1, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid, tcache_ptrs[tcid] - tcache_bases[tcid], tcache_sizes[tcid], block_counts[tcid], block_max_counts[tcid]); block_counts[tcid] = 0; - block_link_counts[tcid] = 0; + block_link_pool_counts[tcid] = 0; + unresolved_links[tcid] = NULL; + memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * hash_table_sizes[tcid]); tcache_ptrs[tcid] = tcache_bases[tcid]; - if (tcid == 0) { // ROM, RAM - memset(hash_table, 0, sizeof(hash_table[0]) * MAX_HASH_ENTRIES); - memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram)); + if (Pico32xMem != NULL) { + if (tcid == 0) // ROM, RAM + memset(Pico32xMem->drcblk_ram, 0, + sizeof(Pico32xMem->drcblk_ram)); + else + memset(Pico32xMem->drcblk_da[tcid - 1], 0, + sizeof(Pico32xMem->drcblk_da[0])); } - else - memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[0])); #if (DRC_DEBUG & 4) tcache_dsm_ptrs[tcid] = tcache_bases[tcid]; #endif + + for (i = 0; i < ram_sizes[tcid] / ADDR_TO_BLOCK_PAGE; i++) + rm_block_list(&inval_lookup[tcid][i]); } -#if LINK_BRANCHES -// add block links (tracked branches) -static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id) +static void add_to_hashlist(struct block_entry *be, int tcache_id) { - block_link *bl = block_links[tcache_id]; - int cnt = block_link_counts[tcache_id]; + u32 tcmask = hash_table_sizes[tcache_id] - 1; - if (cnt >= block_max_counts[tcache_id] * 2) { - dbg(1, "bl overflow for tcache %d\n", tcache_id); - return -1; + be->next = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask); + HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be; + +#if (DRC_DEBUG & 2) + if (be->next != NULL) { + printf(" %08x: hash collision with %08x\n", + be->pc, be->next->pc); + hash_collisions++; } +#endif +} - bl[cnt].target_pc = target_pc; - bl[cnt].jump = jump; - block_link_counts[tcache_id]++; +static void rm_from_hashlist(struct block_entry *be, int tcache_id) +{ + u32 tcmask = hash_table_sizes[tcache_id] - 1; + struct block_entry *cur, *prev; + + cur = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask); + if (cur == NULL) + goto missing; + + if (be == cur) { // first + HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be->next; + return; + } - return 0; + for (prev = cur, cur = cur->next; cur != NULL; cur = cur->next) { + if (cur == be) { + prev->next = cur->next; + return; + } + } + +missing: + dbg(1, "rm_from_hashlist: be %p %08x missing?", be, be->pc); } -#endif -static block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id) +static struct block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id) { - block_desc *bd; + struct block_entry *be; + struct block_desc *bd; int tcache_id; int *bcount; - bd = dr_get_bd(addr, is_slave, &tcache_id); - if (bd != NULL) { - dbg(2, "block override for %08x", addr); - bd->tcache_ptr = tcache_ptr; - bd->end_addr = end_addr; - *blk_id = bd - block_tables[tcache_id]; - return bd; - } + // do a lookup to get tcache_id and override check + be = dr_get_entry(addr, is_slave, &tcache_id); + if (be != NULL) + dbg(1, "block override for %08x", addr); bcount = &block_counts[tcache_id]; if (*bcount >= block_max_counts[tcache_id]) { dbg(1, "bd overflow for tcache %d", tcache_id); return NULL; } - if (*bcount == 0) - (*bcount)++; // not using descriptor 0 bd = &block_tables[tcache_id][*bcount]; bd->addr = addr; bd->end_addr = end_addr; - bd->tcache_ptr = tcache_ptr; - *blk_id = *bcount; - (*bcount)++; - if ((addr & 0xc6000000) == 0x02000000) { // ROM - bd->next = HASH_FUNC(hash_table, addr); - HASH_FUNC(hash_table, addr) = bd; + bd->entry_count = 1; + bd->entryp[0].pc = addr; + bd->entryp[0].tcache_ptr = tcache_ptr; + bd->entryp[0].links = NULL; #if (DRC_DEBUG & 2) - if (bd->next != NULL) { - printf(" hash collision with %08x\n", bd->next->addr); - hash_collisions++; - } + bd->entryp[0].block = bd; + bd->refcount = 0; #endif - } + add_to_hashlist(&bd->entryp[0], tcache_id); + + *blk_id = *bcount; + (*bcount)++; return bd; } static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id) { - block_desc *bd = NULL; + struct block_entry *be = NULL; void *block = NULL; - bd = dr_get_bd(pc, is_slave, tcache_id); - if (bd != NULL) - block = bd->tcache_ptr; + be = dr_get_entry(pc, is_slave, tcache_id); + if (be != NULL) + block = be->tcache_ptr; #if (DRC_DEBUG & 2) - if (bd != NULL) - bd->refcount++; + if (be != NULL) + be->block->refcount++; #endif return block; } @@ -421,43 +508,79 @@ static void *dr_failure(void) exit(1); } -static void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id) +static void *dr_prepare_ext_branch(u32 pc, int is_slave, int tcache_id) { #if LINK_BRANCHES + struct block_link *bl = block_link_pool[tcache_id]; + int cnt = block_link_pool_counts[tcache_id]; + struct block_entry *be = NULL; int target_tcache_id; - void *target; - int ret; - - target = dr_lookup_block(pc, sh2->is_slave, &target_tcache_id); - if (target_tcache_id == tcache_id) { - // allow linking blocks only from local cache - ret = dr_add_block_link(pc, tcache_ptr, tcache_id); - if (ret < 0) - return NULL; + int i; + + be = dr_get_entry(pc, is_slave, &target_tcache_id); + if (target_tcache_id != tcache_id) + return sh2_drc_dispatcher; + + // if pool has been freed, reuse + for (i = cnt - 1; i >= 0; i--) + if (bl[i].target_pc != 0) + break; + cnt = i + 1; + if (cnt >= block_link_pool_max_counts[tcache_id]) { + dbg(1, "bl overflow for tcache %d\n", tcache_id); + return NULL; } - if (target == NULL || target_tcache_id != tcache_id) - target = sh2_drc_dispatcher; + bl += cnt; + block_link_pool_counts[tcache_id]++; + + bl->target_pc = pc; + bl->jump = tcache_ptr; - return target; + if (be != NULL) { + dbg(2, "- early link from %p to pc %08x", bl->jump, pc); + bl->next = be->links; + be->links = bl; + return be->tcache_ptr; + } + else { + bl->next = unresolved_links[tcache_id]; + unresolved_links[tcache_id] = bl; + return sh2_drc_dispatcher; + } #else return sh2_drc_dispatcher; #endif } -static void dr_link_blocks(void *target, u32 pc, int tcache_id) +static void dr_link_blocks(struct block_entry *be, int tcache_id) { #if LINK_BRANCHES - block_link *bl = block_links[tcache_id]; - int cnt = block_link_counts[tcache_id]; - int i; - - for (i = 0; i < cnt; i++) { - if (bl[i].target_pc == pc) { - dbg(2, "- link from %p", bl[i].jump); - emith_jump_patch(bl[i].jump, target); - // XXX: sync ARM caches (old jump should be fine)? + struct block_link *first = unresolved_links[tcache_id]; + struct block_link *bl, *prev, *tmp; + u32 pc = be->pc; + + for (bl = prev = first; bl != NULL; ) { + if (bl->target_pc == pc) { + dbg(2, "- link from %p to pc %08x", bl->jump, pc); + emith_jump_patch(bl->jump, tcache_ptr); + + // move bl from unresolved_links to block_entry + tmp = bl->next; + bl->next = be->links; + be->links = bl; + + if (bl == first) + first = prev = bl = tmp; + else + prev->next = bl = tmp; + continue; } + prev = bl; + bl = bl->next; } + unresolved_links[tcache_id] = first; + + // could sync arm caches here, but that's unnecessary #endif } @@ -1024,7 +1147,7 @@ static int emit_memhandler_read_rr(sh2_reg_e rd, sh2_reg_e rs, u32 offs, int siz return hr2; } -static void emit_memhandler_write(int size, u32 pc, int delay) +static void emit_memhandler_write(int size, u32 pc) { int ctxr; host_arg2reg(ctxr, 2); @@ -1034,22 +1157,12 @@ static void emit_memhandler_write(int size, u32 pc, int delay) switch (size) { case 0: // 8 // XXX: consider inlining sh2_drc_write8 - if (delay) { - emith_call(sh2_drc_write8_slot); - } else { - emit_move_r_imm32(SHR_PC, pc); - rcache_clean(); - emith_call(sh2_drc_write8); - } + rcache_clean(); + emith_call(sh2_drc_write8); break; case 1: // 16 - if (delay) { - emith_call(sh2_drc_write16_slot); - } else { - emit_move_r_imm32(SHR_PC, pc); - rcache_clean(); - emith_call(sh2_drc_write16); - } + rcache_clean(); + emith_call(sh2_drc_write16); break; case 2: // 32 emith_move_r_r(ctxr, CONTEXT_REG); @@ -1057,9 +1170,9 @@ static void emit_memhandler_write(int size, u32 pc, int delay) break; } + rcache_invalidate(); if (reg_map_g2h[SHR_SR] != -1) emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4); - rcache_invalidate(); } // @(Rx,Ry) @@ -1189,7 +1302,6 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) // XXX: maybe use structs instead? u32 branch_target_pc[MAX_LOCAL_BRANCHES]; void *branch_target_ptr[MAX_LOCAL_BRANCHES]; - int branch_target_blkid[MAX_LOCAL_BRANCHES]; int branch_target_count = 0; void *branch_patch_ptr[MAX_LOCAL_BRANCHES]; u32 branch_patch_pc[MAX_LOCAL_BRANCHES]; @@ -1198,7 +1310,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) int literal_addr_count = 0; int pending_branch_cond = -1; int pending_branch_pc = 0; - u8 op_flags[BLOCK_CYCLE_LIMIT]; + u8 op_flags[BLOCK_INSN_LIMIT]; struct { u32 delayed_op:2; u32 test_irq:1; @@ -1207,15 +1319,15 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) // PC of current, first, last, last_target_blk SH2 insn u32 pc, base_pc, end_pc, out_pc; - void *block_entry; - block_desc *this_block; + void *block_entry_ptr; + struct block_desc *block; u16 *dr_pc_base; int blkid_main = 0; int skip_op = 0; u32 tmp, tmp2; int cycles; + int i, v; int op; - int i; base_pc = sh2->pc; @@ -1239,23 +1351,23 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) // 1st pass: scan forward for local branches scan_block(base_pc, sh2->is_slave, op_flags, &end_pc); - this_block = dr_add_block(base_pc, end_pc + MAX_LITERAL_OFFSET, // XXX + block = dr_add_block(base_pc, end_pc + MAX_LITERAL_OFFSET, // XXX sh2->is_slave, &blkid_main); - if (this_block == NULL) + if (block == NULL) return NULL; - block_entry = tcache_ptr; - dbg(2, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm', - tcache_id, blkid_main, base_pc, block_entry); + block_entry_ptr = tcache_ptr; + dbg(2, "== %csh2 block #%d,%d %08x-%08x -> %p", sh2->is_slave ? 's' : 'm', + tcache_id, blkid_main, base_pc, end_pc, block_entry_ptr); - dr_link_blocks(tcache_ptr, base_pc, tcache_id); + dr_link_blocks(&block->entryp[0], tcache_id); // collect branch_targets that don't land on delay slots - for (pc = base_pc; pc <= end_pc; pc += 2) { - if (!(OP_FLAGS(pc) & OF_TARGET)) + for (pc = base_pc; pc < end_pc; pc += 2) { + if (!(OP_FLAGS(pc) & OF_BTARGET)) continue; if (OP_FLAGS(pc) & OF_DELAY_OP) { - OP_FLAGS(pc) &= ~OF_TARGET; + OP_FLAGS(pc) &= ~OF_BTARGET; continue; } ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break); @@ -1263,7 +1375,6 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) if (branch_target_count > 0) { memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count); - memset(branch_target_blkid, 0, sizeof(branch_target_blkid[0]) * branch_target_count); } // ------------------------------------------------- @@ -1279,13 +1390,12 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) op = FETCH_OP(pc); - if ((OP_FLAGS(pc) & OF_TARGET) || pc == base_pc) + if ((OP_FLAGS(pc) & OF_BTARGET) || pc == base_pc) { i = find_in_array(branch_target_pc, branch_target_count, pc); if (pc != base_pc) { - /* make "subblock" - just a mid-block entry */ - block_desc *subblock; + // make block entry sr = rcache_get_reg(SHR_SR, RC_GR_RMW); FLUSH_CYCLES(sr); @@ -1296,16 +1406,28 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) rcache_flush(); do_host_disasm(tcache_id); - dbg(2, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm', - tcache_id, branch_target_blkid[i], pc, tcache_ptr); + v = block->entry_count; + if (v < ARRAY_SIZE(block->entryp)) { + block->entryp[v].pc = pc; + block->entryp[v].tcache_ptr = tcache_ptr; + block->entryp[v].links = NULL; +#if (DRC_DEBUG & 2) + block->entryp[v].block = block; +#endif + add_to_hashlist(&block->entryp[v], tcache_id); + block->entry_count++; - subblock = dr_add_block(pc, end_pc + MAX_LITERAL_OFFSET, // XXX - sh2->is_slave, &branch_target_blkid[i]); - if (subblock == NULL) - return NULL; + dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p", sh2->is_slave ? 's' : 'm', + tcache_id, blkid_main, pc, tcache_ptr); - // since we made a block entry, link any other blocks that jump to current pc - dr_link_blocks(tcache_ptr, pc, tcache_id); + // since we made a block entry, link any other blocks + // that jump to current pc + dr_link_blocks(&block->entryp[v], tcache_id); + } + else { + dbg(1, "too many entryp for block #%d,%d pc=%08x", + tcache_id, blkid_main, pc); + } } if (i >= 0) branch_target_ptr[i] = tcache_ptr; @@ -1327,7 +1449,8 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) #endif #if (DRC_DEBUG & 4) DasmSH2(sh2dasm_buff, pc, op); - printf("%08x %04x %s\n", pc, op, sh2dasm_buff); + printf("%c%08x %04x %s\n", (OP_FLAGS(pc) & OF_BTARGET) ? '*' : ' ', + pc, op, sh2dasm_buff); #endif #ifdef DRC_CMP //if (out_pc != 0 && out_pc != (u32)-1) @@ -1406,7 +1529,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp2 = rcache_get_reg_arg(0, SHR_R0); tmp3 = rcache_get_reg(GET_Rn(), RC_GR_READ); emith_add_r_r(tmp2, tmp3); - emit_memhandler_write(op & 3, pc, drcf.delayed_op); + emit_memhandler_write(op & 3, pc); goto end_op; case 0x07: // MUL.L Rm,Rn 0000nnnnmmmm0111 @@ -1573,7 +1696,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp2 = rcache_get_reg_arg(1, GET_Rm()); if (op & 0x0f) emith_add_r_imm(tmp, (op & 0x0f) * 4); - emit_memhandler_write(2, pc, drcf.delayed_op); + emit_memhandler_write(2, pc); goto end_op; case 0x02: @@ -1585,7 +1708,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) rcache_clean(); rcache_get_reg_arg(0, GET_Rn()); rcache_get_reg_arg(1, GET_Rm()); - emit_memhandler_write(op & 3, pc, drcf.delayed_op); + emit_memhandler_write(op & 3, pc); goto end_op; case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100 case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101 @@ -1595,7 +1718,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) rcache_clean(); rcache_get_reg_arg(0, GET_Rn()); rcache_get_reg_arg(1, GET_Rm()); - emit_memhandler_write(op & 3, pc, drcf.delayed_op); + emit_memhandler_write(op & 3, pc); goto end_op; case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111 sr = rcache_get_reg(SHR_SR, RC_GR_RMW); @@ -1931,7 +2054,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp3 = rcache_get_reg_arg(1, tmp); if (tmp == SHR_SR) emith_clear_msb(tmp3, tmp3, 22); // reserved bits defined by ISA as 0 - emit_memhandler_write(2, pc, drcf.delayed_op); + emit_memhandler_write(2, pc); goto end_op; case 0x04: case 0x05: @@ -2093,8 +2216,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) emith_move_r_r(tmp2, tmp); rcache_free_tmp(tmp); rcache_get_reg_arg(0, GET_Rn()); - emit_memhandler_write(0, pc, drcf.delayed_op); - cycles += 3; + emit_memhandler_write(0, pc); break; default: goto default_; @@ -2259,7 +2381,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp3 = (op & 0x100) >> 8; if (op & 0x0f) emith_add_r_imm(tmp, (op & 0x0f) << tmp3); - emit_memhandler_write(tmp3, pc, drcf.delayed_op); + emit_memhandler_write(tmp3, pc); goto end_op; case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd @@ -2348,7 +2470,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp2 = rcache_get_reg_arg(1, SHR_R0); tmp3 = (op & 0x300) >> 8; emith_add_r_imm(tmp, (op & 0xff) << tmp3); - emit_memhandler_write(tmp3, pc, drcf.delayed_op); + emit_memhandler_write(tmp3, pc); goto end_op; case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd @@ -2364,12 +2486,12 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) emith_add_r_imm(tmp, 4); tmp = rcache_get_reg_arg(1, SHR_SR); emith_clear_msb(tmp, tmp, 22); - emit_memhandler_write(2, pc, drcf.delayed_op); + emit_memhandler_write(2, pc); // push PC rcache_get_reg_arg(0, SHR_SP); tmp = rcache_get_tmp_arg(1); emith_move_r_imm(tmp, pc); - emit_memhandler_write(2, pc, drcf.delayed_op); + emit_memhandler_write(2, pc); // obtain new PC emit_memhandler_read_rr(SHR_PC, SHR_VBR, (op & 0xff) * 4, 2); out_pc = (u32)-1; @@ -2428,8 +2550,7 @@ static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id) tmp3 = rcache_get_reg_arg(0, SHR_GBR); tmp4 = rcache_get_reg(SHR_R0, RC_GR_READ); emith_add_r_r(tmp3, tmp4); - emit_memhandler_write(0, pc, drcf.delayed_op); - cycles += 2; + emit_memhandler_write(0, pc); goto end_op; } goto default_; @@ -2510,7 +2631,7 @@ end_op: emit_move_r_imm32(SHR_PC, target_pc); rcache_clean(); - target = dr_prepare_ext_branch(target_pc, sh2, tcache_id); + target = dr_prepare_ext_branch(target_pc, sh2->is_slave, tcache_id); if (target == NULL) return NULL; emith_jump_cond_patchable(pending_branch_cond, target); @@ -2552,7 +2673,7 @@ end_op: emit_move_r_imm32(SHR_PC, out_pc); rcache_flush(); - target = dr_prepare_ext_branch(out_pc, sh2, tcache_id); + target = dr_prepare_ext_branch(out_pc, sh2->is_slave, tcache_id); if (target == NULL) return NULL; emith_jump_patchable(target); @@ -2579,66 +2700,57 @@ end_op: // mark memory blocks as containing compiled code // override any overlay blocks as they become unreachable anyway - if (tcache_id != 0 || (this_block->addr & 0xc7fc0000) == 0x06000000) + if (tcache_id != 0 || (block->addr & 0xc7fc0000) == 0x06000000) { - u16 *p, *drc_ram_blk = NULL; - u32 mask = 0, shift = 0; + u16 *drc_ram_blk = NULL; + u32 addr, mask = 0, shift = 0; if (tcache_id != 0) { // data array, BIOS drc_ram_blk = Pico32xMem->drcblk_da[sh2->is_slave]; shift = SH2_DRCBLK_DA_SHIFT; - mask = 0xfff/2; + mask = 0xfff; } - else if ((this_block->addr & 0xc7fc0000) == 0x06000000) { + else if ((block->addr & 0xc7fc0000) == 0x06000000) { // SDRAM drc_ram_blk = Pico32xMem->drcblk_ram; shift = SH2_DRCBLK_RAM_SHIFT; - mask = 0x3ffff/2; + mask = 0x3ffff; } - drc_ram_blk[(base_pc >> shift) & mask] = (blkid_main << 1) | 1; - for (pc = base_pc + 2; pc < end_pc; pc += 2) { - p = &drc_ram_blk[(pc >> shift) & mask]; - if (*p && *p != (blkid_main << 1)) - *p = BLOCKID_OVERLAP; // block intersection.. - else - *p = blkid_main << 1; - } - - // mark block entries (used by dr_get_bd()) - for (i = 0; i < branch_target_count; i++) - if (branch_target_blkid[i] != 0) - drc_ram_blk[(branch_target_pc[i] >> shift) & mask] = - (branch_target_blkid[i] << 1) | 1; + // mark recompiled insns + drc_ram_blk[(base_pc & mask) >> shift] = 1; + for (pc = base_pc; pc < end_pc; pc += 2) + drc_ram_blk[(pc & mask) >> shift] = 1; // mark literals for (i = 0; i < literal_addr_count; i++) { tmp = literal_addr[i]; - p = &drc_ram_blk[(tmp >> shift) & mask]; - if (*p && *p != (blkid_main << 1)) - *p = BLOCKID_OVERLAP; - else - *p = blkid_main << 1; - if (!(tmp & 3) && shift == 1) - p[1] = p[0]; // assume long + drc_ram_blk[(tmp & mask) >> shift] = 1; + } + + // add to invalidation lookup lists + addr = base_pc & ~(ADDR_TO_BLOCK_PAGE - 1); + for (; addr < end_pc + MAX_LITERAL_OFFSET; addr += ADDR_TO_BLOCK_PAGE) { + i = (addr & mask) / ADDR_TO_BLOCK_PAGE; + add_to_block_list(&inval_lookup[tcache_id][i], block); } } tcache_ptrs[tcache_id] = tcache_ptr; - host_instructions_updated(block_entry, tcache_ptr); + host_instructions_updated(block_entry_ptr, tcache_ptr); do_host_disasm(tcache_id); dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f", - tcache_id, block_counts[tcache_id], + tcache_id, blkid_main, tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id], - insns_compiled, host_insn_count, (double)host_insn_count / insns_compiled); + insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled); if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]); /* printf("~~~\n"); - tcache_dsm_ptrs[tcache_id] = block_entry; + tcache_dsm_ptrs[tcache_id] = block_entry_ptr; do_host_disasm(tcache_id); printf("~~~\n"); */ @@ -2647,13 +2759,12 @@ end_op: fflush(stdout); #endif - return block_entry; + return block_entry_ptr; } static void sh2_generate_utils(void) { int arg0, arg1, arg2, sr, tmp; - void *sh2_drc_write_end, *sh2_drc_write_slot_end; sh2_drc_write32 = p32x_sh2_write32; sh2_drc_read8 = p32x_sh2_read8; @@ -2754,51 +2865,15 @@ static void sh2_generate_utils(void) emith_call(sh2_drc_test_irq); emith_jump(sh2_drc_dispatcher); - // write-caused irq detection - sh2_drc_write_end = tcache_ptr; - emith_tst_r_r(arg0, arg0); - EMITH_SJMP_START(DCOND_NE); - emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp)); // return - EMITH_SJMP_END(DCOND_NE); - emith_call(sh2_drc_test_irq); - emith_jump_ctx(offsetof(SH2, drc_tmp)); - - // write-caused irq detection for writes in delay slot - sh2_drc_write_slot_end = tcache_ptr; - emith_tst_r_r(arg0, arg0); - EMITH_SJMP_START(DCOND_NE); - emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp)); - EMITH_SJMP_END(DCOND_NE); - // just burn cycles to get back to dispatcher after branch is handled - sr = rcache_get_reg(SHR_SR, RC_GR_RMW); - emith_ctx_write(sr, offsetof(SH2, irq_cycles)); - emith_clear_msb(sr, sr, 20); // clear cycles - rcache_flush(); - emith_jump_ctx(offsetof(SH2, drc_tmp)); - // sh2_drc_write8(u32 a, u32 d) sh2_drc_write8 = (void *)tcache_ptr; - emith_ret_to_ctx(offsetof(SH2, drc_tmp)); emith_ctx_read(arg2, offsetof(SH2, write8_tab)); - emith_sh2_wcall(arg0, arg2, sh2_drc_write_end); + emith_sh2_wcall(arg0, arg2); // sh2_drc_write16(u32 a, u32 d) sh2_drc_write16 = (void *)tcache_ptr; - emith_ret_to_ctx(offsetof(SH2, drc_tmp)); - emith_ctx_read(arg2, offsetof(SH2, write16_tab)); - emith_sh2_wcall(arg0, arg2, sh2_drc_write_end); - - // sh2_drc_write8_slot(u32 a, u32 d) - sh2_drc_write8_slot = (void *)tcache_ptr; - emith_ret_to_ctx(offsetof(SH2, drc_tmp)); - emith_ctx_read(arg2, offsetof(SH2, write8_tab)); - emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end); - - // sh2_drc_write16_slot(u32 a, u32 d) - sh2_drc_write16_slot = (void *)tcache_ptr; - emith_ret_to_ctx(offsetof(SH2, drc_tmp)); emith_ctx_read(arg2, offsetof(SH2, write16_tab)); - emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end); + emith_sh2_wcall(arg0, arg2); #ifdef PDB_NET // debug @@ -2832,9 +2907,7 @@ static void sh2_generate_utils(void) MAKE_READ_WRAPPER(sh2_drc_read16); MAKE_READ_WRAPPER(sh2_drc_read32); MAKE_WRITE_WRAPPER(sh2_drc_write8); - MAKE_WRITE_WRAPPER(sh2_drc_write8_slot); MAKE_WRITE_WRAPPER(sh2_drc_write16); - MAKE_WRITE_WRAPPER(sh2_drc_write16_slot); MAKE_WRITE_WRAPPER(sh2_drc_write32); #if (DRC_DEBUG & 4) host_dasm_new_symbol(sh2_drc_read8); @@ -2850,112 +2923,92 @@ static void sh2_generate_utils(void) host_dasm_new_symbol(sh2_drc_dispatcher); host_dasm_new_symbol(sh2_drc_exit); host_dasm_new_symbol(sh2_drc_test_irq); - host_dasm_new_symbol(sh2_drc_write_end); - host_dasm_new_symbol(sh2_drc_write_slot_end); host_dasm_new_symbol(sh2_drc_write8); - host_dasm_new_symbol(sh2_drc_write8_slot); host_dasm_new_symbol(sh2_drc_write16); - host_dasm_new_symbol(sh2_drc_write16_slot); #endif } -static void sh2_smc_rm_block_entry(block_desc *bd, int tcache_id) +static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 ram_mask) { + struct block_link *bl, *bl_next, *bl_unresolved; void *tmp; + u32 i, addr; - // XXX: kill links somehow? dbg(2, " killing entry %08x-%08x, blkid %d,%d", bd->addr, bd->end_addr, tcache_id, bd - block_tables[tcache_id]); - if (bd->addr == 0 || bd->tcache_ptr == NULL) { + if (bd->addr == 0 || bd->entry_count == 0) { dbg(1, " killing dead block!? %08x", bd->addr); return; } - // since we never reuse space of dead blocks, - // insert jump to dispatcher for blocks that are linked to this point - //emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher); + // remove from inval_lookup + addr = bd->addr & ~(ADDR_TO_BLOCK_PAGE - 1); + for (; addr < bd->end_addr; addr += ADDR_TO_BLOCK_PAGE) { + i = (addr & ram_mask) / ADDR_TO_BLOCK_PAGE; + rm_from_block_list(&inval_lookup[tcache_id][i], bd); + } - // attempt to handle self-modifying blocks by exiting at nearest known PC tmp = tcache_ptr; - tcache_ptr = bd->tcache_ptr; - emit_move_r_imm32(SHR_PC, bd->addr); - rcache_flush(); - emith_jump(sh2_drc_dispatcher); + bl_unresolved = unresolved_links[tcache_id]; + + // remove from hash table, make incoming links unresolved + // XXX: maybe patch branches w/flush instead? + for (i = 0; i < bd->entry_count; i++) { + rm_from_hashlist(&bd->entryp[i], tcache_id); + + // since we never reuse tcache space of dead blocks, + // insert jump to dispatcher for blocks that are linked to this + tcache_ptr = bd->entryp[i].tcache_ptr; + emit_move_r_imm32(SHR_PC, bd->addr); + rcache_flush(); + emith_jump(sh2_drc_dispatcher); + + host_instructions_updated(bd->entryp[i].tcache_ptr, tcache_ptr); + + for (bl = bd->entryp[i].links; bl != NULL; ) { + bl_next = bl->next; + bl->next = bl_unresolved; + bl_unresolved = bl; + bl = bl_next; + } + } - host_instructions_updated(bd->tcache_ptr, tcache_ptr); tcache_ptr = tmp; + unresolved_links[tcache_id] = bl_unresolved; bd->addr = bd->end_addr = 0; + bd->entry_count = 0; } static void sh2_smc_rm_block(u32 a, u16 *drc_ram_blk, int tcache_id, u32 shift, u32 mask) { - block_desc *btab = block_tables[tcache_id]; - u16 *p = drc_ram_blk + ((a & mask) >> shift); - u16 *pmax = drc_ram_blk + (mask >> shift); - u32 id = ~0, end_addr; - int max_zeros = MAX_LITERAL_OFFSET >> shift; - int i, zeros; - - if (*p == 0 || (*p >> 1) >= BLOCKID_MAX) { - u32 from = ~0, to = 0; - dbg(1, "slow-remove blocks at @%08x", a); - for (i = 0; i < block_counts[tcache_id]; i++) { - if (btab[i].addr <= a && a < btab[i].end_addr) { - if (btab[i].addr < from) - from = btab[i].addr; - if (btab[i].end_addr > to) - to = btab[i].end_addr; - sh2_smc_rm_block_entry(&btab[i], tcache_id); - } - } - if (from < to) { - p = drc_ram_blk + ((from & mask) >> shift); - memset(p, 0, (to - from) >> (shift - 1)); - } - return; - } - - // use end_addr to distinguish the same block - end_addr = btab[*p >> 1].end_addr; - - // go up to the start - for (zeros = 0; p > drc_ram_blk && zeros < max_zeros; p--) { - // there can be holes because games sometimes keep variables - // directly in literal pool and we don't inline them - // to avoid recompile (Star Wars Arcade) - if (p[-1] == 0) { - zeros++; + struct block_list **blist = NULL, *entry; + u32 from = ~0, to = 0; + struct block_desc *block; + + blist = &inval_lookup[tcache_id][(a & mask) / ADDR_TO_BLOCK_PAGE]; + entry = *blist; + while (entry != NULL) { + block = entry->block; + if (block->addr <= a && a < block->end_addr) { + if (block->addr < from) + from = block->addr; + if (block->end_addr > to) + to = block->end_addr; + + sh2_smc_rm_block_entry(block, tcache_id, mask); + + // entry lost, restart search + entry = *blist; continue; } - zeros = 0; - if ((p[-1] >> 1) >= BLOCKID_MAX) - break; - if (btab[p[-1] >> 1].end_addr != end_addr) - break; + entry = entry->next; } - if (!(*p & 1)) - dbg(1, "smc rm: missing block start for %08x?", a); - - // now go down and kill everything - for (zeros = 0; p < pmax && zeros < max_zeros; p++) { - if (*p == 0) { - zeros++; - continue; - } - zeros = 0; - if ((*p >> 1) >= BLOCKID_MAX) - break; - if ((*p >> 1) == id) { - *p = 0; - continue; - } - id = *p >> 1; - if (btab[id].end_addr != end_addr) - break; - *p = 0; - sh2_smc_rm_block_entry(&btab[id], tcache_id); + // clear entry points + if (from < to) { + u16 *p = drc_ram_blk + ((from & mask) >> shift); + memset(p, 0, (to - from) >> (shift - 1)); } } @@ -3005,7 +3058,7 @@ void block_stats(void) total += block_tables[b][i].refcount; for (c = 0; c < 10; c++) { - block_desc *blk, *maxb = NULL; + struct block_desc *blk, *maxb = NULL; int max = 0; for (b = 0; b < ARRAY_SIZE(block_tables); b++) { for (i = 0; i < block_counts[b]; i++) { @@ -3059,12 +3112,22 @@ int sh2_drc_init(SH2 *sh2) if (block_tables[i] == NULL) goto fail; // max 2 block links (exits) per block - block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0])); - if (block_links[i] == NULL) + block_link_pool[i] = calloc(block_link_pool_max_counts[i], + sizeof(*block_link_pool[0])); + if (block_link_pool[i] == NULL) + goto fail; + + inval_lookup[i] = calloc(ram_sizes[i] / ADDR_TO_BLOCK_PAGE, + sizeof(inval_lookup[0])); + if (inval_lookup[i] == NULL) + goto fail; + + hash_tables[i] = calloc(hash_table_sizes[i], sizeof(*hash_tables[0])); + if (hash_tables[i] == NULL) goto fail; } memset(block_counts, 0, sizeof(block_counts)); - memset(block_link_counts, 0, sizeof(block_link_counts)); + memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts)); drc_cmn_init(); tcache_ptr = tcache; @@ -3090,12 +3153,6 @@ int sh2_drc_init(SH2 *sh2) #endif } - if (hash_table == NULL) { - hash_table = calloc(sizeof(hash_table[0]), MAX_HASH_ENTRIES); - if (hash_table == NULL) - goto fail; - } - return 0; fail: @@ -3107,32 +3164,37 @@ void sh2_drc_finish(SH2 *sh2) { int i; - if (block_tables[0] != NULL) { - block_stats(); + if (block_tables[0] == NULL) + return; - for (i = 0; i < TCACHE_BUFFERS; i++) { + sh2_drc_flush_all(); + + for (i = 0; i < TCACHE_BUFFERS; i++) { #if (DRC_DEBUG & 4) - printf("~~~ tcache %d\n", i); - tcache_dsm_ptrs[i] = tcache_bases[i]; - tcache_ptr = tcache_ptrs[i]; - do_host_disasm(i); + printf("~~~ tcache %d\n", i); + tcache_dsm_ptrs[i] = tcache_bases[i]; + tcache_ptr = tcache_ptrs[i]; + do_host_disasm(i); #endif - if (block_tables[i] != NULL) - free(block_tables[i]); - block_tables[i] = NULL; - if (block_links[i] == NULL) - free(block_links[i]); - block_links[i] = NULL; - } + if (block_tables[i] != NULL) + free(block_tables[i]); + block_tables[i] = NULL; + if (block_link_pool[i] == NULL) + free(block_link_pool[i]); + block_link_pool[i] = NULL; - drc_cmn_cleanup(); - } + if (inval_lookup[i] == NULL) + free(inval_lookup[i]); + inval_lookup[i] = NULL; - if (hash_table != NULL) { - free(hash_table); - hash_table = NULL; + if (hash_tables[i] != NULL) { + free(hash_tables[i]); + hash_tables[i] = NULL; + } } + + drc_cmn_cleanup(); } #endif /* DRC_SH2 */ @@ -3175,19 +3237,19 @@ void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc) u32 pc, target, op; int cycles; - memset(op_flags, 0, BLOCK_CYCLE_LIMIT); + memset(op_flags, 0, BLOCK_INSN_LIMIT); dr_pc_base = dr_get_pc_base(base_pc, is_slave); - for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT-1; cycles++, pc += 2) { + for (cycles = 0, pc = base_pc; cycles < BLOCK_INSN_LIMIT-1; cycles++, pc += 2) { op = FETCH_OP(pc); if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR signed int offs = ((signed int)(op << 20) >> 19); pc += 2; OP_FLAGS(pc) |= OF_DELAY_OP; target = pc + offs + 2; - if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2) - OP_FLAGS(target) |= OF_TARGET; + if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2) + OP_FLAGS(target) |= OF_BTARGET; break; } if ((op & 0xf000) == 0) { @@ -3212,8 +3274,8 @@ void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc) if (op & 0x0400) OP_FLAGS(pc + 2) |= OF_DELAY_OP; target = pc + offs + 4; - if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2) - OP_FLAGS(target) |= OF_TARGET; + if (base_pc <= target && target < base_pc + BLOCK_INSN_LIMIT * 2) + OP_FLAGS(target) |= OF_BTARGET; } if ((op & 0xff00) == 0xc300) // TRAPA break;