7 #include "../drc/cmn.h"
9 #define BLOCK_CYCLE_LIMIT 100
12 SHR_R0 = 0, SHR_R15 = 15,
13 SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
14 SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
17 typedef struct block_desc_ {
18 u32 addr; // SH2 PC address
19 void *tcache_ptr; // translated block for above PC
20 struct block_desc_ *next; // next block with the same PC hash
23 #define MAX_BLOCK_COUNT 1024
24 static block_desc *block_table;
25 static int block_count;
27 #define MAX_HASH_ENTRIES 1024
28 #define HASH_MASK (MAX_HASH_ENTRIES - 1)
31 #include "mame/sh2dasm.h"
32 #include <platform/linux/host_dasm.h>
33 static void *tcache_dsm_ptr = tcache;
36 static void *tcache_ptr;
38 #include "../drc/emit_x86.c"
40 extern void sh2_drc_entry(SH2 *sh2, void *block);
41 extern void sh2_drc_exit(void);
44 extern void __attribute__((regparm(2))) sh2_do_op(SH2 *sh2, int opcode);
46 static void *dr_find_block(block_desc *tab, u32 addr)
48 for (tab = tab->next; tab != NULL; tab = tab->next)
49 if (tab->addr == addr)
53 return tab->tcache_ptr;
55 printf("block miss for %08x\n", addr);
59 static block_desc *dr_add_block(u32 addr, void *tcache_ptr)
63 if (block_count == MAX_BLOCK_COUNT) {
64 // FIXME: flush cache instead
65 printf("block descriptor overflow\n");
69 bd = &block_table[block_count];
71 bd->tcache_ptr = tcache_ptr;
77 #define HASH_FUNC(hash_tab, addr) \
78 ((block_desc **)(hash_tab))[(addr) & HASH_MASK]
80 // ---------------------------------------------------------------
82 static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
84 int host_dst = reg_map_g2h[dst];
89 emith_move_r_imm(tmp, imm);
91 emith_ctx_write(tmp, dst * 4);
94 static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
96 int host_dst = reg_map_g2h[dst], host_src = reg_map_g2h[src];
99 if (host_dst != -1 && host_src != -1) {
100 emith_move_r_r(host_dst, host_src);
110 emith_ctx_read(tmp, src * 4);
112 emith_ctx_write(tmp, dst * 4);
115 static void emit_braf(sh2_reg_e reg, u32 pc)
117 int host_reg = reg_map_g2h[reg];
118 if (host_reg == -1) {
119 emith_ctx_read(0, reg * 4);
121 emith_move_r_r(0, host_reg);
122 emith_add_r_imm(0, pc);
124 emith_ctx_write(0, SHR_PC * 4);
127 // FIXME: this is broken, delayed insn shouldn't affect branch
129 if (delayed_op < 0) { \
134 pc -= 2 /* adjust back */
137 static int sh2_translate_op4(int op)
143 emith_pass_arg(2, sh2, op);
144 emith_call(sh2_do_op);
152 static void *sh2_translate(SH2 *sh2, block_desc *other_block)
154 void *block_entry = tcache_ptr;
155 block_desc *this_block;
156 unsigned int pc = sh2->pc;
157 int op, delayed_op = -1;
161 this_block = dr_add_block(pc, block_entry);
162 if (other_block != NULL) {
163 printf("hash collision between %08x and %08x\n", pc, other_block->addr);
164 this_block->next = other_block;
166 HASH_FUNC(sh2->pc_hashtab, pc) = this_block;
169 printf("== %csh2 block #%d %08x %p\n", sh2->is_slave ? 's' : 'm',
170 block_count, pc, block_entry);
173 while (cycles < BLOCK_CYCLE_LIMIT)
179 op = p32x_sh2_read16(pc, sh2->is_slave);
184 DasmSH2(buff, pc, op);
185 printf("%08x %04x %s\n", pc, op, buff);
193 switch ((op >> 12) & 0x0f)
196 // RTS 0000000000001011
199 emit_move_r_r(SHR_PC, SHR_PR);
203 // RTE 0000000000101011
207 //emit_move_r_r(SHR_PC, SHR_PR);
208 emit_move_r_imm32(SHR_PC, pc - 4);
209 emith_pass_arg(2, sh2, op);
210 emith_call(sh2_do_op);
213 // BRAF Rm 0000mmmm00100011
217 emit_braf((op >> 8) & 0x0f, pc);
220 // BSRF Rm 0000mmmm00000011
223 emit_move_r_imm32(SHR_PR, pc);
224 emit_braf((op >> 8) & 0x0f, pc);
231 // JMP @Rm 0100mmmm00101011
232 if ((op & 0xff) == 0x2b) {
234 emit_move_r_r(SHR_PC, (op >> 8) & 0x0f);
238 // JSR @Rm 0100mmmm00001011
239 if ((op & 0xff) == 0x0b) {
241 emit_move_r_imm32(SHR_PR, pc);
242 emit_move_r_r(SHR_PC, (op >> 8) & 0x0f);
250 switch (op & 0x0f00) {
251 // BT/S label 10001101dddddddd
253 // BF/S label 10001111dddddddd
259 // BT label 10001001dddddddd
261 // BF label 10001011dddddddd
264 emit_move_r_imm32(SHR_PC, pc);
266 tmp = ((signed int)(op << 24) >> 23);
267 EMIT_CONDITIONAL(emit_move_r_imm32(SHR_PC, pc + tmp + adj), (op & 0x0200) ? 1 : 0);
274 // BRA label 1010dddddddddddd
277 tmp = ((signed int)(op << 20) >> 19);
278 emit_move_r_imm32(SHR_PC, pc + tmp);
283 // BSR label 1011dddddddddddd
285 emit_move_r_imm32(SHR_PR, pc);
290 emit_move_r_imm32(SHR_PC, pc - 2);
291 emith_pass_arg(2, sh2, op);
292 emith_call(sh2_do_op);
297 host_dasm(tcache_dsm_ptr, (char *)tcache_ptr - (char *)tcache_dsm_ptr);
298 tcache_dsm_ptr = tcache_ptr;
303 if ((char *)tcache_ptr - (char *)tcache > DRC_TCACHE_SIZE) {
304 printf("tcache overflow!\n");
309 if (reg_map_g2h[SHR_SR] == -1) {
310 emith_ctx_sub(cycles << 12, SHR_SR * 4);
312 emith_sub_r_imm(reg_map_g2h[SHR_SR], cycles << 12);
313 emith_jump(sh2_drc_exit);
316 host_dasm(tcache_dsm_ptr, (char *)tcache_ptr - (char *)tcache_dsm_ptr);
317 tcache_dsm_ptr = tcache_ptr;
324 host_dasm(tcache_dsm_ptr, (char *)tcache_ptr - (char *)tcache_dsm_ptr);
325 tcache_dsm_ptr = tcache_ptr;
330 void __attribute__((noinline)) sh2_drc_dispatcher(SH2 *sh2)
332 while (((signed int)sh2->sr >> 12) > 0)
334 block_desc *bd = HASH_FUNC(sh2->pc_hashtab, sh2->pc);
338 if (bd->addr == sh2->pc)
339 block = bd->tcache_ptr;
341 block = dr_find_block(bd, sh2->pc);
345 block = sh2_translate(sh2, bd);
348 printf("= %csh2 enter %08x %p\n", sh2->is_slave ? 's' : 'm', sh2->pc, block);
350 sh2_drc_entry(sh2, block);
354 void sh2_execute(SH2 *sh2, int cycles)
356 sh2->cycles_aim += cycles;
357 cycles = sh2->cycles_aim - sh2->cycles_done;
359 // cycles are kept in SHR_SR unused bits (upper 20)
361 sh2->sr |= cycles << 12;
362 sh2_drc_dispatcher(sh2);
364 sh2->cycles_done += cycles - ((signed int)sh2->sr >> 12);
368 static int cmn_init_done;
370 static int common_init(void)
373 block_table = calloc(MAX_BLOCK_COUNT, sizeof(*block_table));
374 if (block_table == NULL)
383 int sh2_drc_init(SH2 *sh2)
385 if (!cmn_init_done) {
386 int ret = common_init();
391 assert(sh2->pc_hashtab == NULL);
392 sh2->pc_hashtab = calloc(sizeof(sh2->pc_hashtab[0]), MAX_HASH_ENTRIES);
393 if (sh2->pc_hashtab == NULL)