drc: debug improvements
[picodrive.git] / cpu / sh2 / compiler.c
... / ...
CommitLineData
1/*
2 * SH2 recompiler
3 * (C) notaz, 2009,2010,2013
4 *
5 * This work is licensed under the terms of MAME license.
6 * See COPYING file in the top-level directory.
7 *
8 * notes:
9 * - tcache, block descriptor, link buffer overflows result in sh2_translate()
10 * failure, followed by full tcache invalidation for that region
11 * - jumps between blocks are tracked for SMC handling (in block_entry->links),
12 * except jumps between different tcaches
13 *
14 * implemented:
15 * - static register allocation
16 * - remaining register caching and tracking in temporaries
17 * - block-local branch linking
18 * - block linking (except between tcaches)
19 * - some constant propagation
20 *
21 * TODO:
22 * - better constant propagation
23 * - stack caching?
24 * - bug fixing
25 */
26#include <stddef.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <assert.h>
30
31#include "../../pico/pico_int.h"
32#include "sh2.h"
33#include "compiler.h"
34#include "../drc/cmn.h"
35#include "../debug.h"
36
37// features
38#define PROPAGATE_CONSTANTS 1
39#define LINK_BRANCHES 1
40
41// limits (per block)
42#define MAX_BLOCK_SIZE (BLOCK_INSN_LIMIT * 6 * 6)
43
44// max literal offset from the block end
45#define MAX_LITERAL_OFFSET 32*2
46#define MAX_LITERALS (BLOCK_INSN_LIMIT / 4)
47#define MAX_LOCAL_BRANCHES 32
48
49// debug stuff
50// 1 - warnings/errors
51// 2 - block info/smc
52// 4 - asm
53// 8 - runtime block entry log
54// {
55#ifndef DRC_DEBUG
56#define DRC_DEBUG 0
57#endif
58
59#if DRC_DEBUG
60#define dbg(l,...) { \
61 if ((l) & DRC_DEBUG) \
62 elprintf(EL_STATUS, ##__VA_ARGS__); \
63}
64#include "mame/sh2dasm.h"
65#include <platform/libpicofe/linux/host_dasm.h>
66static int insns_compiled, hash_collisions, host_insn_count;
67#define COUNT_OP \
68 host_insn_count++
69#else // !DRC_DEBUG
70#define COUNT_OP
71#define dbg(...)
72#endif
73
74///
75#define FETCH_OP(pc) \
76 dr_pc_base[(pc) / 2]
77
78#define FETCH32(a) \
79 ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
80
81#define CHECK_UNHANDLED_BITS(mask, label) { \
82 if ((op & (mask)) != 0) \
83 goto label; \
84}
85
86#define GET_Fx() \
87 ((op >> 4) & 0x0f)
88
89#define GET_Rm GET_Fx
90
91#define GET_Rn() \
92 ((op >> 8) & 0x0f)
93
94#define BITMASK1(v0) (1 << (v0))
95#define BITMASK2(v0,v1) ((1 << (v0)) | (1 << (v1)))
96#define BITMASK3(v0,v1,v2) (BITMASK2(v0,v1) | (1 << (v2)))
97#define BITMASK4(v0,v1,v2,v3) (BITMASK3(v0,v1,v2) | (1 << (v3)))
98#define BITMASK5(v0,v1,v2,v3,v4) (BITMASK4(v0,v1,v2,v3) | (1 << (v4)))
99
100#define SHR_T SHR_SR // might make them separate someday
101
102static struct op_data {
103 u8 op;
104 u8 cycles;
105 u8 size; // 0, 1, 2 - byte, word, long
106 s8 rm; // branch or load/store data reg
107 u32 source; // bitmask of src regs
108 u32 dest; // bitmask of dest regs
109 u32 imm; // immediate/io address/branch target
110 // (for literal - address, not value)
111} ops[BLOCK_INSN_LIMIT];
112
113enum op_types {
114 OP_UNHANDLED = 0,
115 OP_BRANCH,
116 OP_BRANCH_CT, // conditional, branch if T set
117 OP_BRANCH_CF, // conditional, branch if T clear
118 OP_BRANCH_R, // indirect
119 OP_BRANCH_RF, // indirect far (PC + Rm)
120 OP_SETCLRT, // T flag set/clear
121 OP_MOVE, // register move
122 OP_LOAD_POOL, // literal pool load
123 OP_SLEEP,
124 OP_RTE,
125};
126
127#ifdef DRC_SH2
128
129#if (DRC_DEBUG & 4)
130static u8 *tcache_dsm_ptrs[3];
131static char sh2dasm_buff[64];
132#define do_host_disasm(tcid) \
133 host_dasm(tcache_dsm_ptrs[tcid], tcache_ptr - tcache_dsm_ptrs[tcid]); \
134 tcache_dsm_ptrs[tcid] = tcache_ptr
135#else
136#define do_host_disasm(x)
137#endif
138
139#if (DRC_DEBUG & 8) || defined(PDB)
140static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
141{
142 if (block != NULL) {
143 dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
144 sh2->pc, block, (signed int)sr >> 12);
145 pdb_step(sh2, sh2->pc);
146 }
147 return block;
148}
149#endif
150// } debug
151
152#define TCACHE_BUFFERS 3
153
154// we have 3 translation cache buffers, split from one drc/cmn buffer.
155// BIOS shares tcache with data array because it's only used for init
156// and can be discarded early
157// XXX: need to tune sizes
158static const int tcache_sizes[TCACHE_BUFFERS] = {
159 DRC_TCACHE_SIZE * 6 / 8, // ROM (rarely used), DRAM
160 DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2
161 DRC_TCACHE_SIZE / 8, // ... slave
162};
163
164static u8 *tcache_bases[TCACHE_BUFFERS];
165static u8 *tcache_ptrs[TCACHE_BUFFERS];
166
167// ptr for code emiters
168static u8 *tcache_ptr;
169
170#define MAX_BLOCK_ENTRIES (BLOCK_INSN_LIMIT / 8)
171
172struct block_link {
173 u32 target_pc;
174 void *jump; // insn address
175 struct block_link *next; // either in block_entry->links or
176};
177
178struct block_entry {
179 u32 pc;
180 void *tcache_ptr; // translated block for above PC
181 struct block_entry *next; // next block in hash_table with same pc hash
182 struct block_link *links; // links to this entry
183#if (DRC_DEBUG & 2)
184 struct block_desc *block;
185#endif
186};
187
188struct block_desc {
189 u32 addr; // block start SH2 PC address
190 u32 end_addr; // address after last op or literal
191#if (DRC_DEBUG & 2)
192 int refcount;
193#endif
194 int entry_count;
195 struct block_entry entryp[MAX_BLOCK_ENTRIES];
196};
197
198static const int block_max_counts[TCACHE_BUFFERS] = {
199 4*1024,
200 256,
201 256,
202};
203static struct block_desc *block_tables[TCACHE_BUFFERS];
204static int block_counts[TCACHE_BUFFERS];
205
206// we have block_link_pool to avoid using mallocs
207static const int block_link_pool_max_counts[TCACHE_BUFFERS] = {
208 4*1024,
209 256,
210 256,
211};
212static struct block_link *block_link_pool[TCACHE_BUFFERS];
213static int block_link_pool_counts[TCACHE_BUFFERS];
214static struct block_link *unresolved_links[TCACHE_BUFFERS];
215
216// used for invalidation
217static const int ram_sizes[TCACHE_BUFFERS] = {
218 0x40000,
219 0x1000,
220 0x1000,
221};
222#define ADDR_TO_BLOCK_PAGE 0x100
223
224struct block_list {
225 struct block_desc *block;
226 struct block_list *next;
227};
228
229// array of pointers to block_lists for RAM and 2 data arrays
230// each array has len: sizeof(mem) / ADDR_TO_BLOCK_PAGE
231static struct block_list **inval_lookup[TCACHE_BUFFERS];
232
233static const int hash_table_sizes[TCACHE_BUFFERS] = {
234 0x1000,
235 0x100,
236 0x100,
237};
238static struct block_entry **hash_tables[TCACHE_BUFFERS];
239
240#define HASH_FUNC(hash_tab, addr, mask) \
241 (hash_tab)[(((addr) >> 20) ^ ((addr) >> 2)) & (mask)]
242
243// host register tracking
244enum {
245 HR_FREE,
246 HR_CACHED, // 'val' has sh2_reg_e
247// HR_CONST, // 'val' has a constant
248 HR_TEMP, // reg used for temp storage
249};
250
251enum {
252 HRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
253 HRF_LOCKED = 1 << 1, // HR_CACHED can't be evicted
254};
255
256typedef struct {
257 u32 hreg:5; // "host" reg
258 u32 greg:5; // "guest" reg
259 u32 type:3;
260 u32 flags:3;
261 u32 stamp:16; // kind of a timestamp
262} temp_reg_t;
263
264// note: reg_temp[] must have at least the amount of
265// registers used by handlers in worst case (currently 4)
266#ifdef __arm__
267#include "../drc/emit_arm.c"
268
269static const int reg_map_g2h[] = {
270 4, 5, 6, 7,
271 8, -1, -1, -1,
272 -1, -1, -1, -1,
273 -1, -1, -1, 9, // r12 .. sp
274 -1, -1, -1, 10, // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
275 -1, -1, -1, -1, // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
276};
277
278static temp_reg_t reg_temp[] = {
279 { 0, },
280 { 1, },
281 { 12, },
282 { 14, },
283 { 2, },
284 { 3, },
285};
286
287#elif defined(__i386__)
288#include "../drc/emit_x86.c"
289
290static const int reg_map_g2h[] = {
291 xSI,-1, -1, -1,
292 -1, -1, -1, -1,
293 -1, -1, -1, -1,
294 -1, -1, -1, -1,
295 -1, -1, -1, xDI,
296 -1, -1, -1, -1,
297};
298
299// ax, cx, dx are usually temporaries by convention
300static temp_reg_t reg_temp[] = {
301 { xAX, },
302 { xBX, },
303 { xCX, },
304 { xDX, },
305};
306
307#else
308#error unsupported arch
309#endif
310
311#define T 0x00000001
312#define S 0x00000002
313#define I 0x000000f0
314#define Q 0x00000100
315#define M 0x00000200
316#define T_save 0x00000800
317
318#define I_SHIFT 4
319#define Q_SHIFT 8
320#define M_SHIFT 9
321
322static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
323static void (*sh2_drc_dispatcher)(void);
324static void (*sh2_drc_exit)(void);
325static void (*sh2_drc_test_irq)(void);
326
327static u32 REGPARM(2) (*sh2_drc_read8)(u32 a, SH2 *sh2);
328static u32 REGPARM(2) (*sh2_drc_read16)(u32 a, SH2 *sh2);
329static u32 REGPARM(2) (*sh2_drc_read32)(u32 a, SH2 *sh2);
330static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
331static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
332static int REGPARM(3) (*sh2_drc_write32)(u32 a, u32 d, SH2 *sh2);
333
334// address space stuff
335static int dr_ctx_get_mem_ptr(u32 a, u32 *mask)
336{
337 int poffs = -1;
338
339 if ((a & ~0x7ff) == 0) {
340 // BIOS
341 poffs = offsetof(SH2, p_bios);
342 *mask = 0x7ff;
343 }
344 else if ((a & 0xfffff000) == 0xc0000000) {
345 // data array
346 poffs = offsetof(SH2, p_da);
347 *mask = 0xfff;
348 }
349 else if ((a & 0xc6000000) == 0x06000000) {
350 // SDRAM
351 poffs = offsetof(SH2, p_sdram);
352 *mask = 0x03ffff;
353 }
354 else if ((a & 0xc6000000) == 0x02000000) {
355 // ROM
356 poffs = offsetof(SH2, p_rom);
357 *mask = 0x3fffff;
358 }
359
360 return poffs;
361}
362
363static struct block_entry *dr_get_entry(u32 pc, int is_slave, int *tcache_id)
364{
365 struct block_entry *be;
366 u32 tcid = 0, mask;
367
368 // data arrays have their own caches
369 if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0)
370 tcid = 1 + is_slave;
371
372 *tcache_id = tcid;
373
374 mask = hash_table_sizes[tcid] - 1;
375 be = HASH_FUNC(hash_tables[tcid], pc, mask);
376 for (; be != NULL; be = be->next)
377 if (be->pc == pc)
378 return be;
379
380 return NULL;
381}
382
383// ---------------------------------------------------------------
384
385// block management
386static void add_to_block_list(struct block_list **blist, struct block_desc *block)
387{
388 struct block_list *added = malloc(sizeof(*added));
389 if (!added) {
390 elprintf(EL_ANOMALY, "drc OOM (1)");
391 return;
392 }
393 added->block = block;
394 added->next = *blist;
395 *blist = added;
396}
397
398static void rm_from_block_list(struct block_list **blist, struct block_desc *block)
399{
400 struct block_list *prev = NULL, *current = *blist;
401 for (; current != NULL; prev = current, current = current->next) {
402 if (current->block == block) {
403 if (prev == NULL)
404 *blist = current->next;
405 else
406 prev->next = current->next;
407 free(current);
408 return;
409 }
410 }
411 dbg(1, "can't rm block %p (%08x-%08x)",
412 block, block->addr, block->end_addr);
413}
414
415static void rm_block_list(struct block_list **blist)
416{
417 struct block_list *tmp, *current = *blist;
418 while (current != NULL) {
419 tmp = current;
420 current = current->next;
421 free(tmp);
422 }
423 *blist = NULL;
424}
425
426static void REGPARM(1) flush_tcache(int tcid)
427{
428 int i;
429
430 dbg(1, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid,
431 tcache_ptrs[tcid] - tcache_bases[tcid], tcache_sizes[tcid],
432 block_counts[tcid], block_max_counts[tcid]);
433
434 block_counts[tcid] = 0;
435 block_link_pool_counts[tcid] = 0;
436 unresolved_links[tcid] = NULL;
437 memset(hash_tables[tcid], 0, sizeof(*hash_tables[0]) * hash_table_sizes[tcid]);
438 tcache_ptrs[tcid] = tcache_bases[tcid];
439 if (Pico32xMem != NULL) {
440 if (tcid == 0) // ROM, RAM
441 memset(Pico32xMem->drcblk_ram, 0,
442 sizeof(Pico32xMem->drcblk_ram));
443 else
444 memset(Pico32xMem->drcblk_da[tcid - 1], 0,
445 sizeof(Pico32xMem->drcblk_da[0]));
446 }
447#if (DRC_DEBUG & 4)
448 tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
449#endif
450
451 for (i = 0; i < ram_sizes[tcid] / ADDR_TO_BLOCK_PAGE; i++)
452 rm_block_list(&inval_lookup[tcid][i]);
453}
454
455static void add_to_hashlist(struct block_entry *be, int tcache_id)
456{
457 u32 tcmask = hash_table_sizes[tcache_id] - 1;
458
459 be->next = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
460 HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be;
461
462#if (DRC_DEBUG & 2)
463 if (be->next != NULL) {
464 printf(" %08x: hash collision with %08x\n",
465 be->pc, be->next->pc);
466 hash_collisions++;
467 }
468#endif
469}
470
471static void rm_from_hashlist(struct block_entry *be, int tcache_id)
472{
473 u32 tcmask = hash_table_sizes[tcache_id] - 1;
474 struct block_entry *cur, *prev;
475
476 cur = HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask);
477 if (cur == NULL)
478 goto missing;
479
480 if (be == cur) { // first
481 HASH_FUNC(hash_tables[tcache_id], be->pc, tcmask) = be->next;
482 return;
483 }
484
485 for (prev = cur, cur = cur->next; cur != NULL; cur = cur->next) {
486 if (cur == be) {
487 prev->next = cur->next;
488 return;
489 }
490 }
491
492missing:
493 dbg(1, "rm_from_hashlist: be %p %08x missing?", be, be->pc);
494}
495
496static struct block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
497{
498 struct block_entry *be;
499 struct block_desc *bd;
500 int tcache_id;
501 int *bcount;
502
503 // do a lookup to get tcache_id and override check
504 be = dr_get_entry(addr, is_slave, &tcache_id);
505 if (be != NULL)
506 dbg(1, "block override for %08x", addr);
507
508 bcount = &block_counts[tcache_id];
509 if (*bcount >= block_max_counts[tcache_id]) {
510 dbg(1, "bd overflow for tcache %d", tcache_id);
511 return NULL;
512 }
513
514 bd = &block_tables[tcache_id][*bcount];
515 bd->addr = addr;
516 bd->end_addr = end_addr;
517
518 bd->entry_count = 1;
519 bd->entryp[0].pc = addr;
520 bd->entryp[0].tcache_ptr = tcache_ptr;
521 bd->entryp[0].links = NULL;
522#if (DRC_DEBUG & 2)
523 bd->entryp[0].block = bd;
524 bd->refcount = 0;
525#endif
526 add_to_hashlist(&bd->entryp[0], tcache_id);
527
528 *blk_id = *bcount;
529 (*bcount)++;
530
531 return bd;
532}
533
534static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id)
535{
536 struct block_entry *be = NULL;
537 void *block = NULL;
538
539 be = dr_get_entry(pc, is_slave, tcache_id);
540 if (be != NULL)
541 block = be->tcache_ptr;
542
543#if (DRC_DEBUG & 2)
544 if (be != NULL)
545 be->block->refcount++;
546#endif
547 return block;
548}
549
550static void *dr_failure(void)
551{
552 lprintf("recompilation failed\n");
553 exit(1);
554}
555
556static void *dr_prepare_ext_branch(u32 pc, int is_slave, int tcache_id)
557{
558#if LINK_BRANCHES
559 struct block_link *bl = block_link_pool[tcache_id];
560 int cnt = block_link_pool_counts[tcache_id];
561 struct block_entry *be = NULL;
562 int target_tcache_id;
563 int i;
564
565 be = dr_get_entry(pc, is_slave, &target_tcache_id);
566 if (target_tcache_id != tcache_id)
567 return sh2_drc_dispatcher;
568
569 // if pool has been freed, reuse
570 for (i = cnt - 1; i >= 0; i--)
571 if (bl[i].target_pc != 0)
572 break;
573 cnt = i + 1;
574 if (cnt >= block_link_pool_max_counts[tcache_id]) {
575 dbg(1, "bl overflow for tcache %d", tcache_id);
576 return NULL;
577 }
578 bl += cnt;
579 block_link_pool_counts[tcache_id]++;
580
581 bl->target_pc = pc;
582 bl->jump = tcache_ptr;
583
584 if (be != NULL) {
585 dbg(2, "- early link from %p to pc %08x", bl->jump, pc);
586 bl->next = be->links;
587 be->links = bl;
588 return be->tcache_ptr;
589 }
590 else {
591 bl->next = unresolved_links[tcache_id];
592 unresolved_links[tcache_id] = bl;
593 return sh2_drc_dispatcher;
594 }
595#else
596 return sh2_drc_dispatcher;
597#endif
598}
599
600static void dr_link_blocks(struct block_entry *be, int tcache_id)
601{
602#if LINK_BRANCHES
603 struct block_link *first = unresolved_links[tcache_id];
604 struct block_link *bl, *prev, *tmp;
605 u32 pc = be->pc;
606
607 for (bl = prev = first; bl != NULL; ) {
608 if (bl->target_pc == pc) {
609 dbg(2, "- link from %p to pc %08x", bl->jump, pc);
610 emith_jump_patch(bl->jump, tcache_ptr);
611
612 // move bl from unresolved_links to block_entry
613 tmp = bl->next;
614 bl->next = be->links;
615 be->links = bl;
616
617 if (bl == first)
618 first = prev = bl = tmp;
619 else
620 prev->next = bl = tmp;
621 continue;
622 }
623 prev = bl;
624 bl = bl->next;
625 }
626 unresolved_links[tcache_id] = first;
627
628 // could sync arm caches here, but that's unnecessary
629#endif
630}
631
632#define ADD_TO_ARRAY(array, count, item, failcode) \
633 array[count++] = item; \
634 if (count >= ARRAY_SIZE(array)) { \
635 dbg(1, "warning: " #array " overflow"); \
636 failcode; \
637 }
638
639static int find_in_array(u32 *array, size_t size, u32 what)
640{
641 size_t i;
642 for (i = 0; i < size; i++)
643 if (what == array[i])
644 return i;
645
646 return -1;
647}
648
649// ---------------------------------------------------------------
650
651// register cache / constant propagation stuff
652typedef enum {
653 RC_GR_READ,
654 RC_GR_WRITE,
655 RC_GR_RMW,
656} rc_gr_mode;
657
658static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking);
659
660// guest regs with constants
661static u32 dr_gcregs[24];
662// a mask of constant/dirty regs
663static u32 dr_gcregs_mask;
664static u32 dr_gcregs_dirty;
665
666#if PROPAGATE_CONSTANTS
667static void gconst_new(sh2_reg_e r, u32 val)
668{
669 int i;
670
671 dr_gcregs_mask |= 1 << r;
672 dr_gcregs_dirty |= 1 << r;
673 dr_gcregs[r] = val;
674
675 // throw away old r that we might have cached
676 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
677 if ((reg_temp[i].type == HR_CACHED) &&
678 reg_temp[i].greg == r) {
679 reg_temp[i].type = HR_FREE;
680 reg_temp[i].flags = 0;
681 }
682 }
683}
684#endif
685
686static int gconst_get(sh2_reg_e r, u32 *val)
687{
688 if (dr_gcregs_mask & (1 << r)) {
689 *val = dr_gcregs[r];
690 return 1;
691 }
692 return 0;
693}
694
695static int gconst_check(sh2_reg_e r)
696{
697 if ((dr_gcregs_mask | dr_gcregs_dirty) & (1 << r))
698 return 1;
699 return 0;
700}
701
702// update hr if dirty, else do nothing
703static int gconst_try_read(int hr, sh2_reg_e r)
704{
705 if (dr_gcregs_dirty & (1 << r)) {
706 emith_move_r_imm(hr, dr_gcregs[r]);
707 dr_gcregs_dirty &= ~(1 << r);
708 return 1;
709 }
710 return 0;
711}
712
713static void gconst_check_evict(sh2_reg_e r)
714{
715 if (dr_gcregs_mask & (1 << r))
716 // no longer cached in reg, make dirty again
717 dr_gcregs_dirty |= 1 << r;
718}
719
720static void gconst_kill(sh2_reg_e r)
721{
722 dr_gcregs_mask &= ~(1 << r);
723 dr_gcregs_dirty &= ~(1 << r);
724}
725
726static void gconst_clean(void)
727{
728 int i;
729
730 for (i = 0; i < ARRAY_SIZE(dr_gcregs); i++)
731 if (dr_gcregs_dirty & (1 << i)) {
732 // using RC_GR_READ here: it will call gconst_try_read,
733 // cache the reg and mark it dirty.
734 rcache_get_reg_(i, RC_GR_READ, 0);
735 }
736}
737
738static void gconst_invalidate(void)
739{
740 dr_gcregs_mask = dr_gcregs_dirty = 0;
741}
742
743static u16 rcache_counter;
744
745static temp_reg_t *rcache_evict(void)
746{
747 // evict reg with oldest stamp
748 int i, oldest = -1;
749 u16 min_stamp = (u16)-1;
750
751 for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
752 if (reg_temp[i].type == HR_CACHED && !(reg_temp[i].flags & HRF_LOCKED) &&
753 reg_temp[i].stamp <= min_stamp) {
754 min_stamp = reg_temp[i].stamp;
755 oldest = i;
756 }
757 }
758
759 if (oldest == -1) {
760 printf("no registers to evict, aborting\n");
761 exit(1);
762 }
763
764 i = oldest;
765 if (reg_temp[i].type == HR_CACHED) {
766 if (reg_temp[i].flags & HRF_DIRTY)
767 // writeback
768 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
769 gconst_check_evict(reg_temp[i].greg);
770 }
771
772 reg_temp[i].type = HR_FREE;
773 reg_temp[i].flags = 0;
774 return &reg_temp[i];
775}
776
777static int get_reg_static(sh2_reg_e r, rc_gr_mode mode)
778{
779 int i = reg_map_g2h[r];
780 if (i != -1) {
781 if (mode != RC_GR_WRITE)
782 gconst_try_read(i, r);
783 }
784 return i;
785}
786
787// note: must not be called when doing conditional code
788static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking)
789{
790 temp_reg_t *tr;
791 int i, ret;
792
793 // maybe statically mapped?
794 ret = get_reg_static(r, mode);
795 if (ret != -1)
796 goto end;
797
798 rcache_counter++;
799
800 // maybe already cached?
801 // if so, prefer against gconst (they must be in sync)
802 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
803 if (reg_temp[i].type == HR_CACHED && reg_temp[i].greg == r) {
804 reg_temp[i].stamp = rcache_counter;
805 if (mode != RC_GR_READ)
806 reg_temp[i].flags |= HRF_DIRTY;
807 ret = reg_temp[i].hreg;
808 goto end;
809 }
810 }
811
812 // use any free reg
813 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
814 if (reg_temp[i].type == HR_FREE) {
815 tr = &reg_temp[i];
816 goto do_alloc;
817 }
818 }
819
820 tr = rcache_evict();
821
822do_alloc:
823 tr->type = HR_CACHED;
824 if (do_locking)
825 tr->flags |= HRF_LOCKED;
826 if (mode != RC_GR_READ)
827 tr->flags |= HRF_DIRTY;
828 tr->greg = r;
829 tr->stamp = rcache_counter;
830 ret = tr->hreg;
831
832 if (mode != RC_GR_WRITE) {
833 if (gconst_check(r)) {
834 if (gconst_try_read(ret, r))
835 tr->flags |= HRF_DIRTY;
836 }
837 else
838 emith_ctx_read(tr->hreg, r * 4);
839 }
840
841end:
842 if (mode != RC_GR_READ)
843 gconst_kill(r);
844
845 return ret;
846}
847
848static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode)
849{
850 return rcache_get_reg_(r, mode, 1);
851}
852
853static int rcache_get_tmp(void)
854{
855 temp_reg_t *tr;
856 int i;
857
858 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
859 if (reg_temp[i].type == HR_FREE) {
860 tr = &reg_temp[i];
861 goto do_alloc;
862 }
863
864 tr = rcache_evict();
865
866do_alloc:
867 tr->type = HR_TEMP;
868 return tr->hreg;
869}
870
871static int rcache_get_arg_id(int arg)
872{
873 int i, r = 0;
874 host_arg2reg(r, arg);
875
876 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
877 if (reg_temp[i].hreg == r)
878 break;
879
880 if (i == ARRAY_SIZE(reg_temp)) // can't happen
881 exit(1);
882
883 if (reg_temp[i].type == HR_CACHED) {
884 // writeback
885 if (reg_temp[i].flags & HRF_DIRTY)
886 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
887 gconst_check_evict(reg_temp[i].greg);
888 }
889 else if (reg_temp[i].type == HR_TEMP) {
890 printf("arg %d reg %d already used, aborting\n", arg, r);
891 exit(1);
892 }
893
894 reg_temp[i].type = HR_FREE;
895 reg_temp[i].flags = 0;
896
897 return i;
898}
899
900// get a reg to be used as function arg
901static int rcache_get_tmp_arg(int arg)
902{
903 int id = rcache_get_arg_id(arg);
904 reg_temp[id].type = HR_TEMP;
905
906 return reg_temp[id].hreg;
907}
908
909// same but caches a reg. RC_GR_READ only.
910static int rcache_get_reg_arg(int arg, sh2_reg_e r)
911{
912 int i, srcr, dstr, dstid;
913 int dirty = 0, src_dirty = 0;
914
915 dstid = rcache_get_arg_id(arg);
916 dstr = reg_temp[dstid].hreg;
917
918 // maybe already statically mapped?
919 srcr = get_reg_static(r, RC_GR_READ);
920 if (srcr != -1)
921 goto do_cache;
922
923 // maybe already cached?
924 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
925 if ((reg_temp[i].type == HR_CACHED) &&
926 reg_temp[i].greg == r)
927 {
928 srcr = reg_temp[i].hreg;
929 if (reg_temp[i].flags & HRF_DIRTY)
930 src_dirty = 1;
931 goto do_cache;
932 }
933 }
934
935 // must read
936 srcr = dstr;
937 if (gconst_check(r)) {
938 if (gconst_try_read(srcr, r))
939 dirty = 1;
940 }
941 else
942 emith_ctx_read(srcr, r * 4);
943
944do_cache:
945 if (dstr != srcr)
946 emith_move_r_r(dstr, srcr);
947#if 1
948 else
949 dirty |= src_dirty;
950
951 if (dirty)
952 // must clean, callers might want to modify the arg before call
953 emith_ctx_write(dstr, r * 4);
954#else
955 if (dirty)
956 reg_temp[dstid].flags |= HRF_DIRTY;
957#endif
958
959 reg_temp[dstid].stamp = ++rcache_counter;
960 reg_temp[dstid].type = HR_CACHED;
961 reg_temp[dstid].greg = r;
962 reg_temp[dstid].flags |= HRF_LOCKED;
963 return dstr;
964}
965
966static void rcache_free_tmp(int hr)
967{
968 int i;
969 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
970 if (reg_temp[i].hreg == hr)
971 break;
972
973 if (i == ARRAY_SIZE(reg_temp) || reg_temp[i].type != HR_TEMP) {
974 printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, reg_temp[i].type);
975 return;
976 }
977
978 reg_temp[i].type = HR_FREE;
979 reg_temp[i].flags = 0;
980}
981
982static void rcache_unlock(int hr)
983{
984 int i;
985 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
986 if (reg_temp[i].type == HR_CACHED && reg_temp[i].hreg == hr)
987 reg_temp[i].flags &= ~HRF_LOCKED;
988}
989
990static void rcache_unlock_all(void)
991{
992 int i;
993 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
994 reg_temp[i].flags &= ~HRF_LOCKED;
995}
996
997static inline u32 rcache_used_hreg_mask(void)
998{
999 u32 mask = 0;
1000 int i;
1001
1002 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
1003 if (reg_temp[i].type != HR_FREE)
1004 mask |= 1 << reg_temp[i].hreg;
1005
1006 return mask;
1007}
1008
1009static void rcache_clean(void)
1010{
1011 int i;
1012 gconst_clean();
1013
1014 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
1015 if (reg_temp[i].type == HR_CACHED && (reg_temp[i].flags & HRF_DIRTY)) {
1016 // writeback
1017 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
1018 reg_temp[i].flags &= ~HRF_DIRTY;
1019 }
1020}
1021
1022static void rcache_invalidate(void)
1023{
1024 int i;
1025 for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
1026 reg_temp[i].type = HR_FREE;
1027 reg_temp[i].flags = 0;
1028 }
1029 rcache_counter = 0;
1030
1031 gconst_invalidate();
1032}
1033
1034static void rcache_flush(void)
1035{
1036 rcache_clean();
1037 rcache_invalidate();
1038}
1039
1040// ---------------------------------------------------------------
1041
1042static int emit_get_rbase_and_offs(u32 a, u32 *offs)
1043{
1044 u32 mask = 0;
1045 int poffs;
1046 int hr;
1047
1048 poffs = dr_ctx_get_mem_ptr(a, &mask);
1049 if (poffs == -1)
1050 return -1;
1051
1052 // XXX: could use some related reg
1053 hr = rcache_get_tmp();
1054 emith_ctx_read(hr, poffs);
1055 emith_add_r_imm(hr, a & mask & ~0xff);
1056 *offs = a & 0xff; // XXX: ARM oriented..
1057 return hr;
1058}
1059
1060static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
1061{
1062#if PROPAGATE_CONSTANTS
1063 gconst_new(dst, imm);
1064#else
1065 int hr = rcache_get_reg(dst, RC_GR_WRITE);
1066 emith_move_r_imm(hr, imm);
1067#endif
1068}
1069
1070static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
1071{
1072 int hr_d = rcache_get_reg(dst, RC_GR_WRITE);
1073 int hr_s = rcache_get_reg(src, RC_GR_READ);
1074
1075 emith_move_r_r(hr_d, hr_s);
1076}
1077
1078// T must be clear, and comparison done just before this
1079static void emit_or_t_if_eq(int srr)
1080{
1081 EMITH_SJMP_START(DCOND_NE);
1082 emith_or_r_imm_c(DCOND_EQ, srr, T);
1083 EMITH_SJMP_END(DCOND_NE);
1084}
1085
1086// arguments must be ready
1087// reg cache must be clean before call
1088static int emit_memhandler_read_(int size, int ram_check)
1089{
1090 int arg0, arg1;
1091 host_arg2reg(arg0, 0);
1092
1093 rcache_clean();
1094
1095 // must writeback cycles for poll detection stuff
1096 // FIXME: rm
1097 if (reg_map_g2h[SHR_SR] != -1)
1098 emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
1099
1100 arg1 = rcache_get_tmp_arg(1);
1101 emith_move_r_r(arg1, CONTEXT_REG);
1102
1103#ifndef PDB_NET
1104 if (ram_check && Pico.rom == (void *)0x02000000 && Pico32xMem->sdram == (void *)0x06000000) {
1105 int tmp = rcache_get_tmp();
1106 emith_and_r_r_imm(tmp, arg0, 0xfb000000);
1107 emith_cmp_r_imm(tmp, 0x02000000);
1108 switch (size) {
1109 case 0: // 8
1110 EMITH_SJMP3_START(DCOND_NE);
1111 emith_eor_r_imm_c(DCOND_EQ, arg0, 1);
1112 emith_read8_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
1113 EMITH_SJMP3_MID(DCOND_NE);
1114 emith_call_cond(DCOND_NE, sh2_drc_read8);
1115 EMITH_SJMP3_END();
1116 break;
1117 case 1: // 16
1118 EMITH_SJMP3_START(DCOND_NE);
1119 emith_read16_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
1120 EMITH_SJMP3_MID(DCOND_NE);
1121 emith_call_cond(DCOND_NE, sh2_drc_read16);
1122 EMITH_SJMP3_END();
1123 break;
1124 case 2: // 32
1125 EMITH_SJMP3_START(DCOND_NE);
1126 emith_read_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
1127 emith_ror_c(DCOND_EQ, arg0, arg0, 16);
1128 EMITH_SJMP3_MID(DCOND_NE);
1129 emith_call_cond(DCOND_NE, sh2_drc_read32);
1130 EMITH_SJMP3_END();
1131 break;
1132 }
1133 }
1134 else
1135#endif
1136 {
1137 switch (size) {
1138 case 0: // 8
1139 emith_call(sh2_drc_read8);
1140 break;
1141 case 1: // 16
1142 emith_call(sh2_drc_read16);
1143 break;
1144 case 2: // 32
1145 emith_call(sh2_drc_read32);
1146 break;
1147 }
1148 }
1149 rcache_invalidate();
1150
1151 if (reg_map_g2h[SHR_SR] != -1)
1152 emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
1153
1154 // assuming arg0 and retval reg matches
1155 return rcache_get_tmp_arg(0);
1156}
1157
1158static int emit_memhandler_read(int size)
1159{
1160 return emit_memhandler_read_(size, 1);
1161}
1162
1163static int emit_memhandler_read_rr(sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
1164{
1165 int hr, hr2, ram_check = 1;
1166 u32 val, offs2;
1167
1168 if (gconst_get(rs, &val)) {
1169 hr = emit_get_rbase_and_offs(val + offs, &offs2);
1170 if (hr != -1) {
1171 hr2 = rcache_get_reg(rd, RC_GR_WRITE);
1172 switch (size) {
1173 case 0: // 8
1174 emith_read8_r_r_offs(hr2, hr, offs2 ^ 1);
1175 emith_sext(hr2, hr2, 8);
1176 break;
1177 case 1: // 16
1178 emith_read16_r_r_offs(hr2, hr, offs2);
1179 emith_sext(hr2, hr2, 16);
1180 break;
1181 case 2: // 32
1182 emith_read_r_r_offs(hr2, hr, offs2);
1183 emith_ror(hr2, hr2, 16);
1184 break;
1185 }
1186 rcache_free_tmp(hr);
1187 return hr2;
1188 }
1189
1190 ram_check = 0;
1191 }
1192
1193 hr = rcache_get_reg_arg(0, rs);
1194 if (offs != 0)
1195 emith_add_r_imm(hr, offs);
1196 hr = emit_memhandler_read_(size, ram_check);
1197 hr2 = rcache_get_reg(rd, RC_GR_WRITE);
1198 if (size != 2) {
1199 emith_sext(hr2, hr, (size == 1) ? 16 : 8);
1200 } else
1201 emith_move_r_r(hr2, hr);
1202 rcache_free_tmp(hr);
1203
1204 return hr2;
1205}
1206
1207static void emit_memhandler_write(int size, u32 pc)
1208{
1209 int ctxr;
1210 host_arg2reg(ctxr, 2);
1211 if (reg_map_g2h[SHR_SR] != -1)
1212 emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
1213
1214 switch (size) {
1215 case 0: // 8
1216 // XXX: consider inlining sh2_drc_write8
1217 rcache_clean();
1218 emith_call(sh2_drc_write8);
1219 break;
1220 case 1: // 16
1221 rcache_clean();
1222 emith_call(sh2_drc_write16);
1223 break;
1224 case 2: // 32
1225 emith_move_r_r(ctxr, CONTEXT_REG);
1226 emith_call(sh2_drc_write32);
1227 break;
1228 }
1229
1230 rcache_invalidate();
1231 if (reg_map_g2h[SHR_SR] != -1)
1232 emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
1233}
1234
1235// @(Rx,Ry)
1236static int emit_indirect_indexed_read(int rx, int ry, int size)
1237{
1238 int a0, t;
1239 a0 = rcache_get_reg_arg(0, rx);
1240 t = rcache_get_reg(ry, RC_GR_READ);
1241 emith_add_r_r(a0, t);
1242 return emit_memhandler_read(size);
1243}
1244
1245// read @Rn, @rm
1246static void emit_indirect_read_double(u32 *rnr, u32 *rmr, int rn, int rm, int size)
1247{
1248 int tmp;
1249
1250 rcache_get_reg_arg(0, rn);
1251 tmp = emit_memhandler_read(size);
1252 emith_ctx_write(tmp, offsetof(SH2, drc_tmp));
1253 rcache_free_tmp(tmp);
1254 tmp = rcache_get_reg(rn, RC_GR_RMW);
1255 emith_add_r_imm(tmp, 1 << size);
1256 rcache_unlock(tmp);
1257
1258 rcache_get_reg_arg(0, rm);
1259 *rmr = emit_memhandler_read(size);
1260 *rnr = rcache_get_tmp();
1261 emith_ctx_read(*rnr, offsetof(SH2, drc_tmp));
1262 tmp = rcache_get_reg(rm, RC_GR_RMW);
1263 emith_add_r_imm(tmp, 1 << size);
1264 rcache_unlock(tmp);
1265}
1266
1267static void emit_do_static_regs(int is_write, int tmpr)
1268{
1269 int i, r, count;
1270
1271 for (i = 0; i < ARRAY_SIZE(reg_map_g2h); i++) {
1272 r = reg_map_g2h[i];
1273 if (r == -1)
1274 continue;
1275
1276 for (count = 1; i < ARRAY_SIZE(reg_map_g2h) - 1; i++, r++) {
1277 if (reg_map_g2h[i + 1] != r + 1)
1278 break;
1279 count++;
1280 }
1281
1282 if (count > 1) {
1283 // i, r point to last item
1284 if (is_write)
1285 emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
1286 else
1287 emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
1288 } else {
1289 if (is_write)
1290 emith_ctx_write(r, i * 4);
1291 else
1292 emith_ctx_read(r, i * 4);
1293 }
1294 }
1295}
1296
1297static void emit_block_entry(void)
1298{
1299 int arg0;
1300
1301 host_arg2reg(arg0, 0);
1302
1303#if (DRC_DEBUG & 8) || defined(PDB)
1304 int arg1, arg2;
1305 host_arg2reg(arg1, 1);
1306 host_arg2reg(arg2, 2);
1307
1308 emit_do_static_regs(1, arg2);
1309 emith_move_r_r(arg1, CONTEXT_REG);
1310 emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
1311 emith_call(sh2_drc_log_entry);
1312 rcache_invalidate();
1313#endif
1314 emith_tst_r_r(arg0, arg0);
1315 EMITH_SJMP_START(DCOND_EQ);
1316 emith_jump_reg_c(DCOND_NE, arg0);
1317 EMITH_SJMP_END(DCOND_EQ);
1318}
1319
1320#define DELAY_SAVE_T(sr) { \
1321 emith_bic_r_imm(sr, T_save); \
1322 emith_tst_r_imm(sr, T); \
1323 EMITH_SJMP_START(DCOND_EQ); \
1324 emith_or_r_imm_c(DCOND_NE, sr, T_save); \
1325 EMITH_SJMP_END(DCOND_EQ); \
1326}
1327
1328#define FLUSH_CYCLES(sr) \
1329 if (cycles > 0) { \
1330 emith_sub_r_imm(sr, cycles << 12); \
1331 cycles = 0; \
1332 }
1333
1334static void *dr_get_pc_base(u32 pc, int is_slave);
1335
1336static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
1337{
1338 u32 branch_target_pc[MAX_LOCAL_BRANCHES];
1339 void *branch_target_ptr[MAX_LOCAL_BRANCHES];
1340 int branch_target_count = 0;
1341 void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
1342 u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
1343 int branch_patch_count = 0;
1344 u32 literal_addr[MAX_LITERALS];
1345 int literal_addr_count = 0;
1346 u8 op_flags[BLOCK_INSN_LIMIT];
1347 struct {
1348 u32 test_irq:1;
1349 u32 pending_branch_direct:1;
1350 u32 pending_branch_indirect:1;
1351 } drcf = { 0, };
1352
1353 // PC of current, first, last SH2 insn
1354 u32 pc, base_pc, end_pc;
1355 u32 end_literals;
1356 void *block_entry_ptr;
1357 struct block_desc *block;
1358 u16 *dr_pc_base;
1359 struct op_data *opd;
1360 int blkid_main = 0;
1361 int skip_op = 0;
1362 u32 tmp, tmp2;
1363 int cycles;
1364 int i, v;
1365 int op;
1366
1367 base_pc = sh2->pc;
1368
1369 // get base/validate PC
1370 dr_pc_base = dr_get_pc_base(base_pc, sh2->is_slave);
1371 if (dr_pc_base == (void *)-1) {
1372 printf("invalid PC, aborting: %08x\n", base_pc);
1373 // FIXME: be less destructive
1374 exit(1);
1375 }
1376
1377 tcache_ptr = tcache_ptrs[tcache_id];
1378
1379 // predict tcache overflow
1380 tmp = tcache_ptr - tcache_bases[tcache_id];
1381 if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE) {
1382 dbg(1, "tcache %d overflow", tcache_id);
1383 return NULL;
1384 }
1385
1386 // initial passes to disassemble and analyze the block
1387 scan_block(base_pc, sh2->is_slave, op_flags, &end_pc, &end_literals);
1388
1389 block = dr_add_block(base_pc, end_literals, sh2->is_slave, &blkid_main);
1390 if (block == NULL)
1391 return NULL;
1392
1393 block_entry_ptr = tcache_ptr;
1394 dbg(2, "== %csh2 block #%d,%d %08x-%08x -> %p", sh2->is_slave ? 's' : 'm',
1395 tcache_id, blkid_main, base_pc, end_pc, block_entry_ptr);
1396
1397 dr_link_blocks(&block->entryp[0], tcache_id);
1398
1399 // collect branch_targets that don't land on delay slots
1400 for (pc = base_pc, i = 0; pc < end_pc; i++, pc += 2) {
1401 if (!(op_flags[i] & OF_BTARGET))
1402 continue;
1403 if (op_flags[i] & OF_DELAY_OP) {
1404 op_flags[i] &= ~OF_BTARGET;
1405 continue;
1406 }
1407 ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
1408 }
1409
1410 if (branch_target_count > 0) {
1411 memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
1412 }
1413
1414 // -------------------------------------------------
1415 // 3rd pass: actual compilation
1416 pc = base_pc;
1417 cycles = 0;
1418 for (i = 0; pc < end_pc; i++)
1419 {
1420 u32 delay_dep_fw = 0, delay_dep_bk = 0;
1421 u32 tmp3, tmp4, sr;
1422
1423 opd = &ops[i];
1424 op = FETCH_OP(pc);
1425
1426#if (DRC_DEBUG & 2)
1427 insns_compiled++;
1428#endif
1429#if (DRC_DEBUG & 4)
1430 DasmSH2(sh2dasm_buff, pc, op);
1431 printf("%c%08x %04x %s\n", (op_flags[i] & OF_BTARGET) ? '*' : ' ',
1432 pc, op, sh2dasm_buff);
1433#endif
1434
1435 if ((op_flags[i] & OF_BTARGET) || pc == base_pc)
1436 {
1437 if (pc != base_pc)
1438 {
1439 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1440 FLUSH_CYCLES(sr);
1441 rcache_flush();
1442
1443 // make block entry
1444 v = block->entry_count;
1445 if (v < ARRAY_SIZE(block->entryp)) {
1446 block->entryp[v].pc = pc;
1447 block->entryp[v].tcache_ptr = tcache_ptr;
1448 block->entryp[v].links = NULL;
1449#if (DRC_DEBUG & 2)
1450 block->entryp[v].block = block;
1451#endif
1452 add_to_hashlist(&block->entryp[v], tcache_id);
1453 block->entry_count++;
1454
1455 dbg(2, "-- %csh2 block #%d,%d entry %08x -> %p",
1456 sh2->is_slave ? 's' : 'm', tcache_id, blkid_main,
1457 pc, tcache_ptr);
1458
1459 // since we made a block entry, link any other blocks
1460 // that jump to current pc
1461 dr_link_blocks(&block->entryp[v], tcache_id);
1462 }
1463 else {
1464 dbg(1, "too many entryp for block #%d,%d pc=%08x",
1465 tcache_id, blkid_main, pc);
1466 }
1467
1468 do_host_disasm(tcache_id);
1469 }
1470
1471 v = find_in_array(branch_target_pc, branch_target_count, pc);
1472 if (v >= 0)
1473 branch_target_ptr[v] = tcache_ptr;
1474
1475 // must update PC
1476 emit_move_r_imm32(SHR_PC, pc);
1477 rcache_clean();
1478
1479 // check cycles
1480 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1481 emith_cmp_r_imm(sr, 0);
1482 emith_jump_cond(DCOND_LE, sh2_drc_exit);
1483 do_host_disasm(tcache_id);
1484 rcache_unlock_all();
1485 }
1486
1487#ifdef DRC_CMP
1488 if (!(op_flags[i] & OF_DELAY_OP)) {
1489 emit_move_r_imm32(SHR_PC, pc);
1490 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1491 FLUSH_CYCLES(sr);
1492 rcache_clean();
1493
1494 tmp = rcache_used_hreg_mask();
1495 emith_save_caller_regs(tmp);
1496 emit_do_static_regs(1, 0);
1497 emith_pass_arg_r(0, CONTEXT_REG);
1498 emith_call(do_sh2_cmp);
1499 emith_restore_caller_regs(tmp);
1500 }
1501#endif
1502
1503 pc += 2;
1504
1505 if (skip_op > 0) {
1506 skip_op--;
1507 continue;
1508 }
1509
1510 if (op_flags[i] & OF_DELAY_OP)
1511 {
1512 // handle delay slot dependencies
1513 delay_dep_fw = opd->dest & ops[i-1].source;
1514 delay_dep_bk = opd->source & ops[i-1].dest;
1515 if (delay_dep_fw & BITMASK1(SHR_T)) {
1516 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1517 DELAY_SAVE_T(sr);
1518 }
1519 if (delay_dep_fw & ~BITMASK1(SHR_T))
1520 dbg(1, "unhandled delay_dep_fw: %x", delay_dep_fw & ~BITMASK1(SHR_T));
1521 if (delay_dep_bk)
1522 dbg(1, "unhandled delay_dep_bk: %x", delay_dep_bk);
1523 }
1524
1525 switch (opd->op)
1526 {
1527 case OP_BRANCH:
1528 case OP_BRANCH_CT:
1529 case OP_BRANCH_CF:
1530 if (opd->dest & BITMASK1(SHR_PR))
1531 emit_move_r_imm32(SHR_PR, pc + 2);
1532 drcf.pending_branch_direct = 1;
1533 goto end_op;
1534
1535 case OP_BRANCH_R:
1536 if (opd->dest & BITMASK1(SHR_PR))
1537 emit_move_r_imm32(SHR_PR, pc + 2);
1538 emit_move_r_r(SHR_PC, opd->rm);
1539 drcf.pending_branch_indirect = 1;
1540 goto end_op;
1541
1542 case OP_BRANCH_RF:
1543 tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE);
1544 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1545 if (opd->dest & BITMASK1(SHR_PR)) {
1546 tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE);
1547 emith_move_r_imm(tmp3, pc + 2);
1548 emith_add_r_r_r(tmp, tmp2, tmp3);
1549 }
1550 else {
1551 emith_move_r_r(tmp, tmp2);
1552 emith_add_r_imm(tmp, pc + 2);
1553 }
1554 drcf.pending_branch_indirect = 1;
1555 goto end_op;
1556
1557 case OP_SLEEP:
1558 printf("TODO sleep\n");
1559 goto end_op;
1560
1561 case OP_RTE:
1562 // pop PC
1563 emit_memhandler_read_rr(SHR_PC, SHR_SP, 0, 2);
1564 // pop SR
1565 tmp = rcache_get_reg_arg(0, SHR_SP);
1566 emith_add_r_imm(tmp, 4);
1567 tmp = emit_memhandler_read(2);
1568 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1569 emith_write_sr(sr, tmp);
1570 rcache_free_tmp(tmp);
1571 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
1572 emith_add_r_imm(tmp, 4*2);
1573 drcf.test_irq = 1;
1574 drcf.pending_branch_indirect = 1;
1575 break;
1576 }
1577
1578 switch ((op >> 12) & 0x0f)
1579 {
1580 /////////////////////////////////////////////
1581 case 0x00:
1582 switch (op & 0x0f)
1583 {
1584 case 0x02:
1585 tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1586 switch (GET_Fx())
1587 {
1588 case 0: // STC SR,Rn 0000nnnn00000010
1589 tmp2 = SHR_SR;
1590 break;
1591 case 1: // STC GBR,Rn 0000nnnn00010010
1592 tmp2 = SHR_GBR;
1593 break;
1594 case 2: // STC VBR,Rn 0000nnnn00100010
1595 tmp2 = SHR_VBR;
1596 break;
1597 default:
1598 goto default_;
1599 }
1600 tmp3 = rcache_get_reg(tmp2, RC_GR_READ);
1601 emith_move_r_r(tmp, tmp3);
1602 if (tmp2 == SHR_SR)
1603 emith_clear_msb(tmp, tmp, 22); // reserved bits defined by ISA as 0
1604 goto end_op;
1605 case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
1606 case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
1607 case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
1608 rcache_clean();
1609 tmp = rcache_get_reg_arg(1, GET_Rm());
1610 tmp2 = rcache_get_reg_arg(0, SHR_R0);
1611 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1612 emith_add_r_r(tmp2, tmp3);
1613 emit_memhandler_write(op & 3, pc);
1614 goto end_op;
1615 case 0x07:
1616 // MUL.L Rm,Rn 0000nnnnmmmm0111
1617 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1618 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1619 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1620 emith_mul(tmp3, tmp2, tmp);
1621 goto end_op;
1622 case 0x08:
1623 switch (GET_Fx())
1624 {
1625 case 0: // CLRT 0000000000001000
1626 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1627 emith_bic_r_imm(sr, T);
1628 break;
1629 case 1: // SETT 0000000000011000
1630 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1631 emith_or_r_imm(sr, T);
1632 break;
1633 case 2: // CLRMAC 0000000000101000
1634 emit_move_r_imm32(SHR_MACL, 0);
1635 emit_move_r_imm32(SHR_MACH, 0);
1636 break;
1637 default:
1638 goto default_;
1639 }
1640 goto end_op;
1641 case 0x09:
1642 switch (GET_Fx())
1643 {
1644 case 0: // NOP 0000000000001001
1645 break;
1646 case 1: // DIV0U 0000000000011001
1647 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1648 emith_bic_r_imm(sr, M|Q|T);
1649 break;
1650 case 2: // MOVT Rn 0000nnnn00101001
1651 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1652 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1653 emith_clear_msb(tmp2, sr, 31);
1654 break;
1655 default:
1656 goto default_;
1657 }
1658 goto end_op;
1659 case 0x0a:
1660 tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1661 switch (GET_Fx())
1662 {
1663 case 0: // STS MACH,Rn 0000nnnn00001010
1664 tmp2 = SHR_MACH;
1665 break;
1666 case 1: // STS MACL,Rn 0000nnnn00011010
1667 tmp2 = SHR_MACL;
1668 break;
1669 case 2: // STS PR,Rn 0000nnnn00101010
1670 tmp2 = SHR_PR;
1671 break;
1672 default:
1673 goto default_;
1674 }
1675 tmp2 = rcache_get_reg(tmp2, RC_GR_READ);
1676 emith_move_r_r(tmp, tmp2);
1677 goto end_op;
1678 case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
1679 case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
1680 case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
1681 tmp = emit_indirect_indexed_read(SHR_R0, GET_Rm(), op & 3);
1682 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1683 if ((op & 3) != 2) {
1684 emith_sext(tmp2, tmp, (op & 1) ? 16 : 8);
1685 } else
1686 emith_move_r_r(tmp2, tmp);
1687 rcache_free_tmp(tmp);
1688 goto end_op;
1689 case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
1690 emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
1691 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
1692 /* MS 16 MAC bits unused if saturated */
1693 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1694 emith_tst_r_imm(sr, S);
1695 EMITH_SJMP_START(DCOND_EQ);
1696 emith_clear_msb_c(DCOND_NE, tmp4, tmp4, 16);
1697 EMITH_SJMP_END(DCOND_EQ);
1698 rcache_unlock(sr);
1699 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW); // might evict SR
1700 emith_mula_s64(tmp3, tmp4, tmp, tmp2);
1701 rcache_free_tmp(tmp2);
1702 sr = rcache_get_reg(SHR_SR, RC_GR_READ); // reget just in case
1703 emith_tst_r_imm(sr, S);
1704
1705 EMITH_JMP_START(DCOND_EQ);
1706 emith_asr(tmp, tmp4, 15);
1707 emith_cmp_r_imm(tmp, -1); // negative overflow (0x80000000..0xffff7fff)
1708 EMITH_SJMP_START(DCOND_GE);
1709 emith_move_r_imm_c(DCOND_LT, tmp4, 0x8000);
1710 emith_move_r_imm_c(DCOND_LT, tmp3, 0x0000);
1711 EMITH_SJMP_END(DCOND_GE);
1712 emith_cmp_r_imm(tmp, 0); // positive overflow (0x00008000..0x7fffffff)
1713 EMITH_SJMP_START(DCOND_LE);
1714 emith_move_r_imm_c(DCOND_GT, tmp4, 0x00007fff);
1715 emith_move_r_imm_c(DCOND_GT, tmp3, 0xffffffff);
1716 EMITH_SJMP_END(DCOND_LE);
1717 EMITH_JMP_END(DCOND_EQ);
1718
1719 rcache_free_tmp(tmp);
1720 goto end_op;
1721 }
1722 goto default_;
1723
1724 /////////////////////////////////////////////
1725 case 0x01:
1726 // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
1727 rcache_clean();
1728 tmp = rcache_get_reg_arg(0, GET_Rn());
1729 tmp2 = rcache_get_reg_arg(1, GET_Rm());
1730 if (op & 0x0f)
1731 emith_add_r_imm(tmp, (op & 0x0f) * 4);
1732 emit_memhandler_write(2, pc);
1733 goto end_op;
1734
1735 case 0x02:
1736 switch (op & 0x0f)
1737 {
1738 case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
1739 case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
1740 case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
1741 rcache_clean();
1742 rcache_get_reg_arg(0, GET_Rn());
1743 rcache_get_reg_arg(1, GET_Rm());
1744 emit_memhandler_write(op & 3, pc);
1745 goto end_op;
1746 case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100
1747 case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101
1748 case 0x06: // MOV.L Rm,@–Rn 0010nnnnmmmm0110
1749 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1750 emith_sub_r_imm(tmp, (1 << (op & 3)));
1751 rcache_clean();
1752 rcache_get_reg_arg(0, GET_Rn());
1753 rcache_get_reg_arg(1, GET_Rm());
1754 emit_memhandler_write(op & 3, pc);
1755 goto end_op;
1756 case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
1757 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1758 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1759 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1760 emith_bic_r_imm(sr, M|Q|T);
1761 emith_tst_r_imm(tmp2, (1<<31));
1762 EMITH_SJMP_START(DCOND_EQ);
1763 emith_or_r_imm_c(DCOND_NE, sr, Q);
1764 EMITH_SJMP_END(DCOND_EQ);
1765 emith_tst_r_imm(tmp3, (1<<31));
1766 EMITH_SJMP_START(DCOND_EQ);
1767 emith_or_r_imm_c(DCOND_NE, sr, M);
1768 EMITH_SJMP_END(DCOND_EQ);
1769 emith_teq_r_r(tmp2, tmp3);
1770 EMITH_SJMP_START(DCOND_PL);
1771 emith_or_r_imm_c(DCOND_MI, sr, T);
1772 EMITH_SJMP_END(DCOND_PL);
1773 goto end_op;
1774 case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
1775 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1776 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1777 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1778 emith_bic_r_imm(sr, T);
1779 emith_tst_r_r(tmp2, tmp3);
1780 emit_or_t_if_eq(sr);
1781 goto end_op;
1782 case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
1783 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1784 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1785 emith_and_r_r(tmp, tmp2);
1786 goto end_op;
1787 case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
1788 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1789 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1790 emith_eor_r_r(tmp, tmp2);
1791 goto end_op;
1792 case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
1793 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1794 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1795 emith_or_r_r(tmp, tmp2);
1796 goto end_op;
1797 case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
1798 tmp = rcache_get_tmp();
1799 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1800 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1801 emith_eor_r_r_r(tmp, tmp2, tmp3);
1802 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1803 emith_bic_r_imm(sr, T);
1804 emith_tst_r_imm(tmp, 0x000000ff);
1805 emit_or_t_if_eq(tmp);
1806 emith_tst_r_imm(tmp, 0x0000ff00);
1807 emit_or_t_if_eq(tmp);
1808 emith_tst_r_imm(tmp, 0x00ff0000);
1809 emit_or_t_if_eq(tmp);
1810 emith_tst_r_imm(tmp, 0xff000000);
1811 emit_or_t_if_eq(tmp);
1812 rcache_free_tmp(tmp);
1813 goto end_op;
1814 case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
1815 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1816 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1817 emith_lsr(tmp, tmp, 16);
1818 emith_or_r_r_lsl(tmp, tmp2, 16);
1819 goto end_op;
1820 case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
1821 case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
1822 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1823 tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1824 if (op & 1) {
1825 emith_sext(tmp, tmp2, 16);
1826 } else
1827 emith_clear_msb(tmp, tmp2, 16);
1828 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1829 tmp2 = rcache_get_tmp();
1830 if (op & 1) {
1831 emith_sext(tmp2, tmp3, 16);
1832 } else
1833 emith_clear_msb(tmp2, tmp3, 16);
1834 emith_mul(tmp, tmp, tmp2);
1835 rcache_free_tmp(tmp2);
1836 goto end_op;
1837 }
1838 goto default_;
1839
1840 /////////////////////////////////////////////
1841 case 0x03:
1842 switch (op & 0x0f)
1843 {
1844 case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
1845 case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
1846 case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
1847 case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
1848 case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
1849 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1850 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1851 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1852 emith_bic_r_imm(sr, T);
1853 emith_cmp_r_r(tmp2, tmp3);
1854 switch (op & 0x07)
1855 {
1856 case 0x00: // CMP/EQ
1857 emit_or_t_if_eq(sr);
1858 break;
1859 case 0x02: // CMP/HS
1860 EMITH_SJMP_START(DCOND_LO);
1861 emith_or_r_imm_c(DCOND_HS, sr, T);
1862 EMITH_SJMP_END(DCOND_LO);
1863 break;
1864 case 0x03: // CMP/GE
1865 EMITH_SJMP_START(DCOND_LT);
1866 emith_or_r_imm_c(DCOND_GE, sr, T);
1867 EMITH_SJMP_END(DCOND_LT);
1868 break;
1869 case 0x06: // CMP/HI
1870 EMITH_SJMP_START(DCOND_LS);
1871 emith_or_r_imm_c(DCOND_HI, sr, T);
1872 EMITH_SJMP_END(DCOND_LS);
1873 break;
1874 case 0x07: // CMP/GT
1875 EMITH_SJMP_START(DCOND_LE);
1876 emith_or_r_imm_c(DCOND_GT, sr, T);
1877 EMITH_SJMP_END(DCOND_LE);
1878 break;
1879 }
1880 goto end_op;
1881 case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
1882 // Q1 = carry(Rn = (Rn << 1) | T)
1883 // if Q ^ M
1884 // Q2 = carry(Rn += Rm)
1885 // else
1886 // Q2 = carry(Rn -= Rm)
1887 // Q = M ^ Q1 ^ Q2
1888 // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
1889 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1890 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1891 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1892 emith_tpop_carry(sr, 0);
1893 emith_adcf_r_r(tmp2, tmp2);
1894 emith_tpush_carry(sr, 0); // keep Q1 in T for now
1895 tmp4 = rcache_get_tmp();
1896 emith_and_r_r_imm(tmp4, sr, M);
1897 emith_eor_r_r_lsr(sr, tmp4, M_SHIFT - Q_SHIFT); // Q ^= M
1898 rcache_free_tmp(tmp4);
1899 // add or sub, invert T if carry to get Q1 ^ Q2
1900 // in: (Q ^ M) passed in Q, Q1 in T
1901 emith_sh2_div1_step(tmp2, tmp3, sr);
1902 emith_bic_r_imm(sr, Q);
1903 emith_tst_r_imm(sr, M);
1904 EMITH_SJMP_START(DCOND_EQ);
1905 emith_or_r_imm_c(DCOND_NE, sr, Q); // Q = M
1906 EMITH_SJMP_END(DCOND_EQ);
1907 emith_tst_r_imm(sr, T);
1908 EMITH_SJMP_START(DCOND_EQ);
1909 emith_eor_r_imm_c(DCOND_NE, sr, Q); // Q = M ^ Q1 ^ Q2
1910 EMITH_SJMP_END(DCOND_EQ);
1911 emith_eor_r_imm(sr, T); // T = !(Q1 ^ Q2)
1912 goto end_op;
1913 case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
1914 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1915 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1916 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1917 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
1918 emith_mul_u64(tmp3, tmp4, tmp, tmp2);
1919 goto end_op;
1920 case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
1921 case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
1922 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1923 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1924 if (op & 4) {
1925 emith_add_r_r(tmp, tmp2);
1926 } else
1927 emith_sub_r_r(tmp, tmp2);
1928 goto end_op;
1929 case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
1930 case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
1931 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1932 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1933 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1934 if (op & 4) { // adc
1935 emith_tpop_carry(sr, 0);
1936 emith_adcf_r_r(tmp, tmp2);
1937 emith_tpush_carry(sr, 0);
1938 } else {
1939 emith_tpop_carry(sr, 1);
1940 emith_sbcf_r_r(tmp, tmp2);
1941 emith_tpush_carry(sr, 1);
1942 }
1943 goto end_op;
1944 case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
1945 case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
1946 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1947 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1948 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1949 emith_bic_r_imm(sr, T);
1950 if (op & 4) {
1951 emith_addf_r_r(tmp, tmp2);
1952 } else
1953 emith_subf_r_r(tmp, tmp2);
1954 EMITH_SJMP_START(DCOND_VC);
1955 emith_or_r_imm_c(DCOND_VS, sr, T);
1956 EMITH_SJMP_END(DCOND_VC);
1957 goto end_op;
1958 case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
1959 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1960 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1961 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1962 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
1963 emith_mul_s64(tmp3, tmp4, tmp, tmp2);
1964 goto end_op;
1965 }
1966 goto default_;
1967
1968 /////////////////////////////////////////////
1969 case 0x04:
1970 switch (op & 0x0f)
1971 {
1972 case 0x00:
1973 switch (GET_Fx())
1974 {
1975 case 0: // SHLL Rn 0100nnnn00000000
1976 case 2: // SHAL Rn 0100nnnn00100000
1977 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1978 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1979 emith_tpop_carry(sr, 0); // dummy
1980 emith_lslf(tmp, tmp, 1);
1981 emith_tpush_carry(sr, 0);
1982 goto end_op;
1983 case 1: // DT Rn 0100nnnn00010000
1984 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1985#ifndef DRC_CMP
1986 if (FETCH_OP(pc) == 0x8bfd) { // BF #-2
1987 if (gconst_get(GET_Rn(), &tmp)) {
1988 // XXX: limit burned cycles
1989 emit_move_r_imm32(GET_Rn(), 0);
1990 emith_or_r_imm(sr, T);
1991 cycles += tmp * 4 + 1; // +1 syncs with noconst version, not sure why
1992 skip_op = 1;
1993 }
1994 else
1995 emith_sh2_dtbf_loop();
1996 goto end_op;
1997 }
1998#endif
1999 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2000 emith_bic_r_imm(sr, T);
2001 emith_subf_r_imm(tmp, 1);
2002 emit_or_t_if_eq(sr);
2003 goto end_op;
2004 }
2005 goto default_;
2006 case 0x01:
2007 switch (GET_Fx())
2008 {
2009 case 0: // SHLR Rn 0100nnnn00000001
2010 case 2: // SHAR Rn 0100nnnn00100001
2011 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2012 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2013 emith_tpop_carry(sr, 0); // dummy
2014 if (op & 0x20) {
2015 emith_asrf(tmp, tmp, 1);
2016 } else
2017 emith_lsrf(tmp, tmp, 1);
2018 emith_tpush_carry(sr, 0);
2019 goto end_op;
2020 case 1: // CMP/PZ Rn 0100nnnn00010001
2021 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
2022 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2023 emith_bic_r_imm(sr, T);
2024 emith_cmp_r_imm(tmp, 0);
2025 EMITH_SJMP_START(DCOND_LT);
2026 emith_or_r_imm_c(DCOND_GE, sr, T);
2027 EMITH_SJMP_END(DCOND_LT);
2028 goto end_op;
2029 }
2030 goto default_;
2031 case 0x02:
2032 case 0x03:
2033 switch (op & 0x3f)
2034 {
2035 case 0x02: // STS.L MACH,@–Rn 0100nnnn00000010
2036 tmp = SHR_MACH;
2037 break;
2038 case 0x12: // STS.L MACL,@–Rn 0100nnnn00010010
2039 tmp = SHR_MACL;
2040 break;
2041 case 0x22: // STS.L PR,@–Rn 0100nnnn00100010
2042 tmp = SHR_PR;
2043 break;
2044 case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
2045 tmp = SHR_SR;
2046 break;
2047 case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
2048 tmp = SHR_GBR;
2049 break;
2050 case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
2051 tmp = SHR_VBR;
2052 break;
2053 default:
2054 goto default_;
2055 }
2056 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2057 emith_sub_r_imm(tmp2, 4);
2058 rcache_clean();
2059 rcache_get_reg_arg(0, GET_Rn());
2060 tmp3 = rcache_get_reg_arg(1, tmp);
2061 if (tmp == SHR_SR)
2062 emith_clear_msb(tmp3, tmp3, 22); // reserved bits defined by ISA as 0
2063 emit_memhandler_write(2, pc);
2064 goto end_op;
2065 case 0x04:
2066 case 0x05:
2067 switch (op & 0x3f)
2068 {
2069 case 0x04: // ROTL Rn 0100nnnn00000100
2070 case 0x05: // ROTR Rn 0100nnnn00000101
2071 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2072 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2073 emith_tpop_carry(sr, 0); // dummy
2074 if (op & 1) {
2075 emith_rorf(tmp, tmp, 1);
2076 } else
2077 emith_rolf(tmp, tmp, 1);
2078 emith_tpush_carry(sr, 0);
2079 goto end_op;
2080 case 0x24: // ROTCL Rn 0100nnnn00100100
2081 case 0x25: // ROTCR Rn 0100nnnn00100101
2082 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2083 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2084 emith_tpop_carry(sr, 0);
2085 if (op & 1) {
2086 emith_rorcf(tmp);
2087 } else
2088 emith_rolcf(tmp);
2089 emith_tpush_carry(sr, 0);
2090 goto end_op;
2091 case 0x15: // CMP/PL Rn 0100nnnn00010101
2092 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2093 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2094 emith_bic_r_imm(sr, T);
2095 emith_cmp_r_imm(tmp, 0);
2096 EMITH_SJMP_START(DCOND_LE);
2097 emith_or_r_imm_c(DCOND_GT, sr, T);
2098 EMITH_SJMP_END(DCOND_LE);
2099 goto end_op;
2100 }
2101 goto default_;
2102 case 0x06:
2103 case 0x07:
2104 switch (op & 0x3f)
2105 {
2106 case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
2107 tmp = SHR_MACH;
2108 break;
2109 case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
2110 tmp = SHR_MACL;
2111 break;
2112 case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
2113 tmp = SHR_PR;
2114 break;
2115 case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
2116 tmp = SHR_SR;
2117 break;
2118 case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
2119 tmp = SHR_GBR;
2120 break;
2121 case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
2122 tmp = SHR_VBR;
2123 break;
2124 default:
2125 goto default_;
2126 }
2127 rcache_get_reg_arg(0, GET_Rn());
2128 tmp2 = emit_memhandler_read(2);
2129 if (tmp == SHR_SR) {
2130 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2131 emith_write_sr(sr, tmp2);
2132 drcf.test_irq = 1;
2133 } else {
2134 tmp = rcache_get_reg(tmp, RC_GR_WRITE);
2135 emith_move_r_r(tmp, tmp2);
2136 }
2137 rcache_free_tmp(tmp2);
2138 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2139 emith_add_r_imm(tmp, 4);
2140 goto end_op;
2141 case 0x08:
2142 case 0x09:
2143 switch (GET_Fx())
2144 {
2145 case 0:
2146 // SHLL2 Rn 0100nnnn00001000
2147 // SHLR2 Rn 0100nnnn00001001
2148 tmp = 2;
2149 break;
2150 case 1:
2151 // SHLL8 Rn 0100nnnn00011000
2152 // SHLR8 Rn 0100nnnn00011001
2153 tmp = 8;
2154 break;
2155 case 2:
2156 // SHLL16 Rn 0100nnnn00101000
2157 // SHLR16 Rn 0100nnnn00101001
2158 tmp = 16;
2159 break;
2160 default:
2161 goto default_;
2162 }
2163 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2164 if (op & 1) {
2165 emith_lsr(tmp2, tmp2, tmp);
2166 } else
2167 emith_lsl(tmp2, tmp2, tmp);
2168 goto end_op;
2169 case 0x0a:
2170 switch (GET_Fx())
2171 {
2172 case 0: // LDS Rm,MACH 0100mmmm00001010
2173 tmp2 = SHR_MACH;
2174 break;
2175 case 1: // LDS Rm,MACL 0100mmmm00011010
2176 tmp2 = SHR_MACL;
2177 break;
2178 case 2: // LDS Rm,PR 0100mmmm00101010
2179 tmp2 = SHR_PR;
2180 break;
2181 default:
2182 goto default_;
2183 }
2184 emit_move_r_r(tmp2, GET_Rn());
2185 goto end_op;
2186 case 0x0b:
2187 switch (GET_Fx())
2188 {
2189 case 1: // TAS.B @Rn 0100nnnn00011011
2190 // XXX: is TAS working on 32X?
2191 rcache_get_reg_arg(0, GET_Rn());
2192 tmp = emit_memhandler_read(0);
2193 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2194 emith_bic_r_imm(sr, T);
2195 emith_cmp_r_imm(tmp, 0);
2196 emit_or_t_if_eq(sr);
2197 rcache_clean();
2198 emith_or_r_imm(tmp, 0x80);
2199 tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
2200 emith_move_r_r(tmp2, tmp);
2201 rcache_free_tmp(tmp);
2202 rcache_get_reg_arg(0, GET_Rn());
2203 emit_memhandler_write(0, pc);
2204 break;
2205 default:
2206 goto default_;
2207 }
2208 goto end_op;
2209 case 0x0e:
2210 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
2211 switch (GET_Fx())
2212 {
2213 case 0: // LDC Rm,SR 0100mmmm00001110
2214 tmp2 = SHR_SR;
2215 break;
2216 case 1: // LDC Rm,GBR 0100mmmm00011110
2217 tmp2 = SHR_GBR;
2218 break;
2219 case 2: // LDC Rm,VBR 0100mmmm00101110
2220 tmp2 = SHR_VBR;
2221 break;
2222 default:
2223 goto default_;
2224 }
2225 if (tmp2 == SHR_SR) {
2226 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2227 emith_write_sr(sr, tmp);
2228 drcf.test_irq = 1;
2229 } else {
2230 tmp2 = rcache_get_reg(tmp2, RC_GR_WRITE);
2231 emith_move_r_r(tmp2, tmp);
2232 }
2233 goto end_op;
2234 case 0x0f:
2235 // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
2236 emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
2237 emith_sext(tmp, tmp, 16);
2238 emith_sext(tmp2, tmp2, 16);
2239 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW);
2240 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
2241 emith_mula_s64(tmp3, tmp4, tmp, tmp2);
2242 rcache_free_tmp(tmp2);
2243 // XXX: MACH should be untouched when S is set?
2244 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2245 emith_tst_r_imm(sr, S);
2246 EMITH_JMP_START(DCOND_EQ);
2247
2248 emith_asr(tmp, tmp3, 31);
2249 emith_eorf_r_r(tmp, tmp4); // tmp = ((signed)macl >> 31) ^ mach
2250 EMITH_JMP_START(DCOND_EQ);
2251 emith_move_r_imm(tmp3, 0x80000000);
2252 emith_tst_r_r(tmp4, tmp4);
2253 EMITH_SJMP_START(DCOND_MI);
2254 emith_sub_r_imm_c(DCOND_PL, tmp3, 1); // positive
2255 EMITH_SJMP_END(DCOND_MI);
2256 EMITH_JMP_END(DCOND_EQ);
2257
2258 EMITH_JMP_END(DCOND_EQ);
2259 rcache_free_tmp(tmp);
2260 goto end_op;
2261 }
2262 goto default_;
2263
2264 /////////////////////////////////////////////
2265 case 0x05:
2266 // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
2267 emit_memhandler_read_rr(GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2);
2268 goto end_op;
2269
2270 /////////////////////////////////////////////
2271 case 0x06:
2272 switch (op & 0x0f)
2273 {
2274 case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
2275 case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
2276 case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
2277 case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
2278 case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
2279 case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
2280 emit_memhandler_read_rr(GET_Rn(), GET_Rm(), 0, op & 3);
2281 if ((op & 7) >= 4 && GET_Rn() != GET_Rm()) {
2282 tmp = rcache_get_reg(GET_Rm(), RC_GR_RMW);
2283 emith_add_r_imm(tmp, (1 << (op & 3)));
2284 }
2285 goto end_op;
2286 case 0x03:
2287 case 0x07 ... 0x0f:
2288 tmp = rcache_get_reg(GET_Rm(), RC_GR_READ);
2289 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2290 switch (op & 0x0f)
2291 {
2292 case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
2293 emith_move_r_r(tmp2, tmp);
2294 break;
2295 case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
2296 emith_mvn_r_r(tmp2, tmp);
2297 break;
2298 case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
2299 tmp3 = tmp2;
2300 if (tmp == tmp2)
2301 tmp3 = rcache_get_tmp();
2302 tmp4 = rcache_get_tmp();
2303 emith_lsr(tmp3, tmp, 16);
2304 emith_or_r_r_lsl(tmp3, tmp, 24);
2305 emith_and_r_r_imm(tmp4, tmp, 0xff00);
2306 emith_or_r_r_lsl(tmp3, tmp4, 8);
2307 emith_rol(tmp2, tmp3, 16);
2308 rcache_free_tmp(tmp4);
2309 if (tmp == tmp2)
2310 rcache_free_tmp(tmp3);
2311 break;
2312 case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
2313 emith_rol(tmp2, tmp, 16);
2314 break;
2315 case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
2316 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2317 emith_tpop_carry(sr, 1);
2318 emith_negcf_r_r(tmp2, tmp);
2319 emith_tpush_carry(sr, 1);
2320 break;
2321 case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
2322 emith_neg_r_r(tmp2, tmp);
2323 break;
2324 case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
2325 emith_clear_msb(tmp2, tmp, 24);
2326 break;
2327 case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
2328 emith_clear_msb(tmp2, tmp, 16);
2329 break;
2330 case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
2331 emith_sext(tmp2, tmp, 8);
2332 break;
2333 case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
2334 emith_sext(tmp2, tmp, 16);
2335 break;
2336 }
2337 goto end_op;
2338 }
2339 goto default_;
2340
2341 /////////////////////////////////////////////
2342 case 0x07:
2343 // ADD #imm,Rn 0111nnnniiiiiiii
2344 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2345 if (op & 0x80) { // adding negative
2346 emith_sub_r_imm(tmp, -op & 0xff);
2347 } else
2348 emith_add_r_imm(tmp, op & 0xff);
2349 goto end_op;
2350
2351 /////////////////////////////////////////////
2352 case 0x08:
2353 switch (op & 0x0f00)
2354 {
2355 case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
2356 case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
2357 rcache_clean();
2358 tmp = rcache_get_reg_arg(0, GET_Rm());
2359 tmp2 = rcache_get_reg_arg(1, SHR_R0);
2360 tmp3 = (op & 0x100) >> 8;
2361 if (op & 0x0f)
2362 emith_add_r_imm(tmp, (op & 0x0f) << tmp3);
2363 emit_memhandler_write(tmp3, pc);
2364 goto end_op;
2365 case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
2366 case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
2367 tmp = (op & 0x100) >> 8;
2368 emit_memhandler_read_rr(SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
2369 goto end_op;
2370 case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
2371 // XXX: could use cmn
2372 tmp = rcache_get_tmp();
2373 tmp2 = rcache_get_reg(0, RC_GR_READ);
2374 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2375 emith_move_r_imm_s8(tmp, op & 0xff);
2376 emith_bic_r_imm(sr, T);
2377 emith_cmp_r_r(tmp2, tmp);
2378 emit_or_t_if_eq(sr);
2379 rcache_free_tmp(tmp);
2380 goto end_op;
2381 }
2382 goto default_;
2383
2384 /////////////////////////////////////////////
2385 case 0x09:
2386 // MOV.W @(disp,PC),Rn 1001nnnndddddddd
2387 tmp = pc + (op & 0xff) * 2 + 2;
2388#if PROPAGATE_CONSTANTS
2389 if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
2390 ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
2391 gconst_new(GET_Rn(), (u32)(int)(signed short)FETCH_OP(tmp));
2392 }
2393 else
2394#endif
2395 {
2396 tmp2 = rcache_get_tmp_arg(0);
2397 emith_move_r_imm(tmp2, tmp);
2398 tmp2 = emit_memhandler_read(1);
2399 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2400 emith_sext(tmp3, tmp2, 16);
2401 rcache_free_tmp(tmp2);
2402 }
2403 goto end_op;
2404
2405 /////////////////////////////////////////////
2406 case 0x0c:
2407 switch (op & 0x0f00)
2408 {
2409 case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
2410 case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
2411 case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
2412 rcache_clean();
2413 tmp = rcache_get_reg_arg(0, SHR_GBR);
2414 tmp2 = rcache_get_reg_arg(1, SHR_R0);
2415 tmp3 = (op & 0x300) >> 8;
2416 emith_add_r_imm(tmp, (op & 0xff) << tmp3);
2417 emit_memhandler_write(tmp3, pc);
2418 goto end_op;
2419 case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
2420 case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
2421 case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
2422 tmp = (op & 0x300) >> 8;
2423 emit_memhandler_read_rr(SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
2424 goto end_op;
2425 case 0x0300: // TRAPA #imm 11000011iiiiiiii
2426 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
2427 emith_sub_r_imm(tmp, 4*2);
2428 // push SR
2429 tmp = rcache_get_reg_arg(0, SHR_SP);
2430 emith_add_r_imm(tmp, 4);
2431 tmp = rcache_get_reg_arg(1, SHR_SR);
2432 emith_clear_msb(tmp, tmp, 22);
2433 emit_memhandler_write(2, pc);
2434 // push PC
2435 rcache_get_reg_arg(0, SHR_SP);
2436 tmp = rcache_get_tmp_arg(1);
2437 emith_move_r_imm(tmp, pc);
2438 emit_memhandler_write(2, pc);
2439 // obtain new PC
2440 emit_memhandler_read_rr(SHR_PC, SHR_VBR, (op & 0xff) * 4, 2);
2441 // indirect jump -> back to dispatcher
2442 emith_jump(sh2_drc_dispatcher);
2443 goto end_op;
2444 case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
2445 emit_move_r_imm32(SHR_R0, (pc + (op & 0xff) * 4 + 2) & ~3);
2446 goto end_op;
2447 case 0x0800: // TST #imm,R0 11001000iiiiiiii
2448 tmp = rcache_get_reg(SHR_R0, RC_GR_READ);
2449 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2450 emith_bic_r_imm(sr, T);
2451 emith_tst_r_imm(tmp, op & 0xff);
2452 emit_or_t_if_eq(sr);
2453 goto end_op;
2454 case 0x0900: // AND #imm,R0 11001001iiiiiiii
2455 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2456 emith_and_r_imm(tmp, op & 0xff);
2457 goto end_op;
2458 case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
2459 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2460 emith_eor_r_imm(tmp, op & 0xff);
2461 goto end_op;
2462 case 0x0b00: // OR #imm,R0 11001011iiiiiiii
2463 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2464 emith_or_r_imm(tmp, op & 0xff);
2465 goto end_op;
2466 case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
2467 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2468 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2469 emith_bic_r_imm(sr, T);
2470 emith_tst_r_imm(tmp, op & 0xff);
2471 emit_or_t_if_eq(sr);
2472 rcache_free_tmp(tmp);
2473 goto end_op;
2474 case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
2475 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2476 emith_and_r_imm(tmp, op & 0xff);
2477 goto end_rmw_op;
2478 case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
2479 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2480 emith_eor_r_imm(tmp, op & 0xff);
2481 goto end_rmw_op;
2482 case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
2483 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2484 emith_or_r_imm(tmp, op & 0xff);
2485 end_rmw_op:
2486 tmp2 = rcache_get_tmp_arg(1);
2487 emith_move_r_r(tmp2, tmp);
2488 rcache_free_tmp(tmp);
2489 tmp3 = rcache_get_reg_arg(0, SHR_GBR);
2490 tmp4 = rcache_get_reg(SHR_R0, RC_GR_READ);
2491 emith_add_r_r(tmp3, tmp4);
2492 emit_memhandler_write(0, pc);
2493 goto end_op;
2494 }
2495 goto default_;
2496
2497 /////////////////////////////////////////////
2498 case 0x0d:
2499 // MOV.L @(disp,PC),Rn 1101nnnndddddddd
2500 tmp = (pc + (op & 0xff) * 4 + 2) & ~3;
2501#if PROPAGATE_CONSTANTS
2502 if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
2503 ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
2504 gconst_new(GET_Rn(), FETCH32(tmp));
2505 }
2506 else
2507#endif
2508 {
2509 tmp2 = rcache_get_tmp_arg(0);
2510 emith_move_r_imm(tmp2, tmp);
2511 tmp2 = emit_memhandler_read(2);
2512 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2513 emith_move_r_r(tmp3, tmp2);
2514 rcache_free_tmp(tmp2);
2515 }
2516 goto end_op;
2517
2518 /////////////////////////////////////////////
2519 case 0x0e:
2520 // MOV #imm,Rn 1110nnnniiiiiiii
2521 emit_move_r_imm32(GET_Rn(), (u32)(signed int)(signed char)op);
2522 goto end_op;
2523
2524 default:
2525 default_:
2526 elprintf(EL_ANOMALY, "%csh2 drc: unhandled op %04x @ %08x",
2527 sh2->is_slave ? 's' : 'm', op, pc - 2);
2528 break;
2529 }
2530
2531end_op:
2532 rcache_unlock_all();
2533
2534 cycles += opd->cycles;
2535
2536 if (op_flags[i+1] & OF_DELAY_OP) {
2537 do_host_disasm(tcache_id);
2538 continue;
2539 }
2540
2541 // test irq?
2542 if (drcf.test_irq && !drcf.pending_branch_direct) {
2543 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2544 FLUSH_CYCLES(sr);
2545 rcache_flush();
2546 emith_call(sh2_drc_test_irq);
2547 drcf.test_irq = 0;
2548 }
2549
2550 // branch handling (with/without delay)
2551 if (drcf.pending_branch_direct)
2552 {
2553 struct op_data *opd_b =
2554 (op_flags[i] & OF_DELAY_OP) ? &ops[i-1] : opd;
2555 u32 target_pc = opd_b->imm;
2556 int cond = -1;
2557 void *target = NULL;
2558
2559 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2560 FLUSH_CYCLES(sr);
2561
2562 if (opd_b->op != OP_BRANCH)
2563 cond = (opd_b->op == OP_BRANCH_CF) ? DCOND_EQ : DCOND_NE;
2564 if (cond != -1) {
2565 int ctaken = (op_flags[i] & OF_DELAY_OP) ? 1 : 2;
2566
2567 if (delay_dep_fw & BITMASK1(SHR_T))
2568 emith_tst_r_imm(sr, T_save);
2569 else
2570 emith_tst_r_imm(sr, T);
2571
2572 emith_sub_r_imm_c(cond, sr, ctaken<<12);
2573 }
2574 rcache_clean();
2575
2576#if LINK_BRANCHES
2577 if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0)
2578 {
2579 // local branch
2580 // XXX: jumps back can be linked already
2581 if (branch_patch_count < MAX_LOCAL_BRANCHES) {
2582 target = tcache_ptr;
2583 branch_patch_pc[branch_patch_count] = target_pc;
2584 branch_patch_ptr[branch_patch_count] = target;
2585 branch_patch_count++;
2586 }
2587 else
2588 dbg(1, "warning: too many local branches");
2589 }
2590
2591 if (target == NULL)
2592#endif
2593 {
2594 // can't resolve branch locally, make a block exit
2595 emit_move_r_imm32(SHR_PC, target_pc);
2596 rcache_clean();
2597
2598 target = dr_prepare_ext_branch(target_pc, sh2->is_slave, tcache_id);
2599 if (target == NULL)
2600 return NULL;
2601 }
2602
2603 if (cond != -1)
2604 emith_jump_cond_patchable(cond, target);
2605 else
2606 emith_jump_patchable(target);
2607
2608 drcf.pending_branch_direct = 0;
2609 }
2610 else if (drcf.pending_branch_indirect) {
2611 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2612 FLUSH_CYCLES(sr);
2613 rcache_flush();
2614 emith_jump(sh2_drc_dispatcher);
2615 drcf.pending_branch_indirect = 0;
2616 }
2617
2618 do_host_disasm(tcache_id);
2619 }
2620
2621 tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
2622 FLUSH_CYCLES(tmp);
2623 rcache_flush();
2624
2625 // check the last op
2626 if (op_flags[i-1] & OF_DELAY_OP)
2627 opd = &ops[i-2];
2628 else
2629 opd = &ops[i-1];
2630
2631 if (opd->op != OP_BRANCH && opd->op != OP_BRANCH_R
2632 && opd->op != OP_BRANCH_RF && opd->op != OP_RTE)
2633 {
2634 void *target;
2635
2636 emit_move_r_imm32(SHR_PC, pc);
2637 rcache_flush();
2638
2639 target = dr_prepare_ext_branch(pc, sh2->is_slave, tcache_id);
2640 if (target == NULL)
2641 return NULL;
2642 emith_jump_patchable(target);
2643 }
2644
2645 // link local branches
2646 for (i = 0; i < branch_patch_count; i++) {
2647 void *target;
2648 int t;
2649 t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
2650 target = branch_target_ptr[t];
2651 if (target == NULL) {
2652 // flush pc and go back to dispatcher (this should no longer happen)
2653 dbg(1, "stray branch to %08x %p", branch_patch_pc[i], tcache_ptr);
2654 target = tcache_ptr;
2655 emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
2656 rcache_flush();
2657 emith_jump(sh2_drc_dispatcher);
2658 }
2659 emith_jump_patch(branch_patch_ptr[i], target);
2660 }
2661
2662 // mark memory blocks as containing compiled code
2663 // override any overlay blocks as they become unreachable anyway
2664 if (tcache_id != 0 || (block->addr & 0xc7fc0000) == 0x06000000)
2665 {
2666 u16 *drc_ram_blk = NULL;
2667 u32 addr, mask = 0, shift = 0;
2668
2669 if (tcache_id != 0) {
2670 // data array, BIOS
2671 drc_ram_blk = Pico32xMem->drcblk_da[sh2->is_slave];
2672 shift = SH2_DRCBLK_DA_SHIFT;
2673 mask = 0xfff;
2674 }
2675 else if ((block->addr & 0xc7fc0000) == 0x06000000) {
2676 // SDRAM
2677 drc_ram_blk = Pico32xMem->drcblk_ram;
2678 shift = SH2_DRCBLK_RAM_SHIFT;
2679 mask = 0x3ffff;
2680 }
2681
2682 // mark recompiled insns
2683 drc_ram_blk[(base_pc & mask) >> shift] = 1;
2684 for (pc = base_pc; pc < end_pc; pc += 2)
2685 drc_ram_blk[(pc & mask) >> shift] = 1;
2686
2687 // mark literals
2688 for (i = 0; i < literal_addr_count; i++) {
2689 tmp = literal_addr[i];
2690 drc_ram_blk[(tmp & mask) >> shift] = 1;
2691 }
2692
2693 // add to invalidation lookup lists
2694 addr = base_pc & ~(ADDR_TO_BLOCK_PAGE - 1);
2695 for (; addr < end_literals; addr += ADDR_TO_BLOCK_PAGE) {
2696 i = (addr & mask) / ADDR_TO_BLOCK_PAGE;
2697 add_to_block_list(&inval_lookup[tcache_id][i], block);
2698 }
2699 }
2700
2701 tcache_ptrs[tcache_id] = tcache_ptr;
2702
2703 host_instructions_updated(block_entry_ptr, tcache_ptr);
2704
2705 do_host_disasm(tcache_id);
2706 dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
2707 tcache_id, blkid_main,
2708 tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
2709 insns_compiled, host_insn_count, (float)host_insn_count / insns_compiled);
2710 if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM
2711 dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
2712/*
2713 printf("~~~\n");
2714 tcache_dsm_ptrs[tcache_id] = block_entry_ptr;
2715 do_host_disasm(tcache_id);
2716 printf("~~~\n");
2717*/
2718
2719#if (DRC_DEBUG & 4)
2720 fflush(stdout);
2721#endif
2722
2723 return block_entry_ptr;
2724}
2725
2726static void sh2_generate_utils(void)
2727{
2728 int arg0, arg1, arg2, sr, tmp;
2729
2730 sh2_drc_write32 = p32x_sh2_write32;
2731 sh2_drc_read8 = p32x_sh2_read8;
2732 sh2_drc_read16 = p32x_sh2_read16;
2733 sh2_drc_read32 = p32x_sh2_read32;
2734
2735 host_arg2reg(arg0, 0);
2736 host_arg2reg(arg1, 1);
2737 host_arg2reg(arg2, 2);
2738 emith_move_r_r(arg0, arg0); // nop
2739
2740 // sh2_drc_exit(void)
2741 sh2_drc_exit = (void *)tcache_ptr;
2742 emit_do_static_regs(1, arg2);
2743 emith_sh2_drc_exit();
2744
2745 // sh2_drc_dispatcher(void)
2746 sh2_drc_dispatcher = (void *)tcache_ptr;
2747 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2748 emith_cmp_r_imm(sr, 0);
2749 emith_jump_cond(DCOND_LT, sh2_drc_exit);
2750 rcache_invalidate();
2751 emith_ctx_read(arg0, SHR_PC * 4);
2752 emith_ctx_read(arg1, offsetof(SH2, is_slave));
2753 emith_add_r_r_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
2754 emith_call(dr_lookup_block);
2755 emit_block_entry();
2756 // lookup failed, call sh2_translate()
2757 emith_move_r_r(arg0, CONTEXT_REG);
2758 emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
2759 emith_call(sh2_translate);
2760 emit_block_entry();
2761 // sh2_translate() failed, flush cache and retry
2762 emith_ctx_read(arg0, offsetof(SH2, drc_tmp));
2763 emith_call(flush_tcache);
2764 emith_move_r_r(arg0, CONTEXT_REG);
2765 emith_ctx_read(arg1, offsetof(SH2, drc_tmp));
2766 emith_call(sh2_translate);
2767 emit_block_entry();
2768 // XXX: can't translate, fail
2769 emith_call(dr_failure);
2770
2771 // sh2_drc_test_irq(void)
2772 // assumes it's called from main function (may jump to dispatcher)
2773 sh2_drc_test_irq = (void *)tcache_ptr;
2774 emith_ctx_read(arg1, offsetof(SH2, pending_level));
2775 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2776 emith_lsr(arg0, sr, I_SHIFT);
2777 emith_and_r_imm(arg0, 0x0f);
2778 emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
2779 EMITH_SJMP_START(DCOND_GT);
2780 emith_ret_c(DCOND_LE); // nope, return
2781 EMITH_SJMP_END(DCOND_GT);
2782 // adjust SP
2783 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
2784 emith_sub_r_imm(tmp, 4*2);
2785 rcache_clean();
2786 // push SR
2787 tmp = rcache_get_reg_arg(0, SHR_SP);
2788 emith_add_r_imm(tmp, 4);
2789 tmp = rcache_get_reg_arg(1, SHR_SR);
2790 emith_clear_msb(tmp, tmp, 22);
2791 emith_move_r_r(arg2, CONTEXT_REG);
2792 emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
2793 rcache_invalidate();
2794 // push PC
2795 rcache_get_reg_arg(0, SHR_SP);
2796 emith_ctx_read(arg1, SHR_PC * 4);
2797 emith_move_r_r(arg2, CONTEXT_REG);
2798 emith_call(p32x_sh2_write32);
2799 rcache_invalidate();
2800 // update I, cycles, do callback
2801 emith_ctx_read(arg1, offsetof(SH2, pending_level));
2802 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2803 emith_bic_r_imm(sr, I);
2804 emith_or_r_r_lsl(sr, arg1, I_SHIFT);
2805 emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
2806 rcache_flush();
2807 emith_move_r_r(arg0, CONTEXT_REG);
2808 emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
2809 // obtain new PC
2810 emith_lsl(arg0, arg0, 2);
2811 emith_ctx_read(arg1, SHR_VBR * 4);
2812 emith_add_r_r(arg0, arg1);
2813 emit_memhandler_read(2);
2814 emith_ctx_write(arg0, SHR_PC * 4);
2815#ifdef __i386__
2816 emith_add_r_imm(xSP, 4); // fix stack
2817#endif
2818 emith_jump(sh2_drc_dispatcher);
2819 rcache_invalidate();
2820
2821 // sh2_drc_entry(SH2 *sh2)
2822 sh2_drc_entry = (void *)tcache_ptr;
2823 emith_sh2_drc_entry();
2824 emith_move_r_r(CONTEXT_REG, arg0); // move ctx, arg0
2825 emit_do_static_regs(0, arg2);
2826 emith_call(sh2_drc_test_irq);
2827 emith_jump(sh2_drc_dispatcher);
2828
2829 // sh2_drc_write8(u32 a, u32 d)
2830 sh2_drc_write8 = (void *)tcache_ptr;
2831 emith_ctx_read(arg2, offsetof(SH2, write8_tab));
2832 emith_sh2_wcall(arg0, arg2);
2833
2834 // sh2_drc_write16(u32 a, u32 d)
2835 sh2_drc_write16 = (void *)tcache_ptr;
2836 emith_ctx_read(arg2, offsetof(SH2, write16_tab));
2837 emith_sh2_wcall(arg0, arg2);
2838
2839#ifdef PDB_NET
2840 // debug
2841 #define MAKE_READ_WRAPPER(func) { \
2842 void *tmp = (void *)tcache_ptr; \
2843 emith_push_ret(); \
2844 emith_call(func); \
2845 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
2846 emith_addf_r_r(arg2, arg0); \
2847 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
2848 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
2849 emith_adc_r_imm(arg2, 0x01000000); \
2850 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
2851 emith_pop_and_ret(); \
2852 func = tmp; \
2853 }
2854 #define MAKE_WRITE_WRAPPER(func) { \
2855 void *tmp = (void *)tcache_ptr; \
2856 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
2857 emith_addf_r_r(arg2, arg1); \
2858 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
2859 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
2860 emith_adc_r_imm(arg2, 0x01000000); \
2861 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
2862 emith_move_r_r(arg2, CONTEXT_REG); \
2863 emith_jump(func); \
2864 func = tmp; \
2865 }
2866
2867 MAKE_READ_WRAPPER(sh2_drc_read8);
2868 MAKE_READ_WRAPPER(sh2_drc_read16);
2869 MAKE_READ_WRAPPER(sh2_drc_read32);
2870 MAKE_WRITE_WRAPPER(sh2_drc_write8);
2871 MAKE_WRITE_WRAPPER(sh2_drc_write16);
2872 MAKE_WRITE_WRAPPER(sh2_drc_write32);
2873#if (DRC_DEBUG & 4)
2874 host_dasm_new_symbol(sh2_drc_read8);
2875 host_dasm_new_symbol(sh2_drc_read16);
2876 host_dasm_new_symbol(sh2_drc_read32);
2877 host_dasm_new_symbol(sh2_drc_write32);
2878#endif
2879#endif
2880
2881 rcache_invalidate();
2882#if (DRC_DEBUG & 4)
2883 host_dasm_new_symbol(sh2_drc_entry);
2884 host_dasm_new_symbol(sh2_drc_dispatcher);
2885 host_dasm_new_symbol(sh2_drc_exit);
2886 host_dasm_new_symbol(sh2_drc_test_irq);
2887 host_dasm_new_symbol(sh2_drc_write8);
2888 host_dasm_new_symbol(sh2_drc_write16);
2889#endif
2890}
2891
2892static void sh2_smc_rm_block_entry(struct block_desc *bd, int tcache_id, u32 ram_mask)
2893{
2894 struct block_link *bl, *bl_next, *bl_unresolved;
2895 void *tmp;
2896 u32 i, addr;
2897
2898 dbg(2, " killing entry %08x-%08x, blkid %d,%d",
2899 bd->addr, bd->end_addr, tcache_id, bd - block_tables[tcache_id]);
2900 if (bd->addr == 0 || bd->entry_count == 0) {
2901 dbg(1, " killing dead block!? %08x", bd->addr);
2902 return;
2903 }
2904
2905 // remove from inval_lookup
2906 addr = bd->addr & ~(ADDR_TO_BLOCK_PAGE - 1);
2907 for (; addr < bd->end_addr; addr += ADDR_TO_BLOCK_PAGE) {
2908 i = (addr & ram_mask) / ADDR_TO_BLOCK_PAGE;
2909 rm_from_block_list(&inval_lookup[tcache_id][i], bd);
2910 }
2911
2912 tmp = tcache_ptr;
2913 bl_unresolved = unresolved_links[tcache_id];
2914
2915 // remove from hash table, make incoming links unresolved
2916 // XXX: maybe patch branches w/flush instead?
2917 for (i = 0; i < bd->entry_count; i++) {
2918 rm_from_hashlist(&bd->entryp[i], tcache_id);
2919
2920 // since we never reuse tcache space of dead blocks,
2921 // insert jump to dispatcher for blocks that are linked to this
2922 tcache_ptr = bd->entryp[i].tcache_ptr;
2923 emit_move_r_imm32(SHR_PC, bd->addr);
2924 rcache_flush();
2925 emith_jump(sh2_drc_dispatcher);
2926
2927 host_instructions_updated(bd->entryp[i].tcache_ptr, tcache_ptr);
2928
2929 for (bl = bd->entryp[i].links; bl != NULL; ) {
2930 bl_next = bl->next;
2931 bl->next = bl_unresolved;
2932 bl_unresolved = bl;
2933 bl = bl_next;
2934 }
2935 }
2936
2937 tcache_ptr = tmp;
2938 unresolved_links[tcache_id] = bl_unresolved;
2939
2940 bd->addr = bd->end_addr = 0;
2941 bd->entry_count = 0;
2942}
2943
2944static void sh2_smc_rm_block(u32 a, u16 *drc_ram_blk, int tcache_id, u32 shift, u32 mask)
2945{
2946 struct block_list **blist = NULL, *entry;
2947 u32 from = ~0, to = 0;
2948 struct block_desc *block;
2949
2950 blist = &inval_lookup[tcache_id][(a & mask) / ADDR_TO_BLOCK_PAGE];
2951 entry = *blist;
2952 while (entry != NULL) {
2953 block = entry->block;
2954 if (block->addr <= a && a < block->end_addr) {
2955 if (block->addr < from)
2956 from = block->addr;
2957 if (block->end_addr > to)
2958 to = block->end_addr;
2959
2960 sh2_smc_rm_block_entry(block, tcache_id, mask);
2961
2962 // entry lost, restart search
2963 entry = *blist;
2964 continue;
2965 }
2966 entry = entry->next;
2967 }
2968
2969 // clear entry points
2970 if (from < to) {
2971 u16 *p = drc_ram_blk + ((from & mask) >> shift);
2972 memset(p, 0, (to - from) >> (shift - 1));
2973 }
2974}
2975
2976void sh2_drc_wcheck_ram(unsigned int a, int val, int cpuid)
2977{
2978 dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
2979 sh2_smc_rm_block(a, Pico32xMem->drcblk_ram, 0, SH2_DRCBLK_RAM_SHIFT, 0x3ffff);
2980}
2981
2982void sh2_drc_wcheck_da(unsigned int a, int val, int cpuid)
2983{
2984 dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
2985 sh2_smc_rm_block(a, Pico32xMem->drcblk_da[cpuid],
2986 1 + cpuid, SH2_DRCBLK_DA_SHIFT, 0xfff);
2987}
2988
2989int sh2_execute(SH2 *sh2c, int cycles)
2990{
2991 int ret_cycles;
2992
2993 sh2c->cycles_timeslice = cycles;
2994
2995 // cycles are kept in SHR_SR unused bits (upper 20)
2996 // bit11 contains T saved for delay slot
2997 // others are usual SH2 flags
2998 sh2c->sr &= 0x3f3;
2999 sh2c->sr |= cycles << 12;
3000 sh2_drc_entry(sh2c);
3001
3002 // TODO: irq cycles
3003 ret_cycles = (signed int)sh2c->sr >> 12;
3004 if (ret_cycles > 0)
3005 dbg(1, "warning: drc returned with cycles: %d", ret_cycles);
3006
3007 return sh2c->cycles_timeslice - ret_cycles;
3008}
3009
3010#if (DRC_DEBUG & 2)
3011void block_stats(void)
3012{
3013 int c, b, i, total = 0;
3014
3015 printf("block stats:\n");
3016 for (b = 0; b < ARRAY_SIZE(block_tables); b++)
3017 for (i = 0; i < block_counts[b]; i++)
3018 if (block_tables[b][i].addr != 0)
3019 total += block_tables[b][i].refcount;
3020
3021 for (c = 0; c < 10; c++) {
3022 struct block_desc *blk, *maxb = NULL;
3023 int max = 0;
3024 for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
3025 for (i = 0; i < block_counts[b]; i++) {
3026 blk = &block_tables[b][i];
3027 if (blk->addr != 0 && blk->refcount > max) {
3028 max = blk->refcount;
3029 maxb = blk;
3030 }
3031 }
3032 }
3033 if (maxb == NULL)
3034 break;
3035 printf("%08x %9d %2.3f%%\n", maxb->addr, maxb->refcount,
3036 (double)maxb->refcount / total * 100.0);
3037 maxb->refcount = 0;
3038 }
3039
3040 for (b = 0; b < ARRAY_SIZE(block_tables); b++)
3041 for (i = 0; i < block_counts[b]; i++)
3042 block_tables[b][i].refcount = 0;
3043}
3044#else
3045#define block_stats()
3046#endif
3047
3048void sh2_drc_flush_all(void)
3049{
3050 block_stats();
3051 flush_tcache(0);
3052 flush_tcache(1);
3053 flush_tcache(2);
3054}
3055
3056void sh2_drc_mem_setup(SH2 *sh2)
3057{
3058 // fill the convenience pointers
3059 sh2->p_bios = sh2->is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
3060 sh2->p_da = Pico32xMem->data_array[sh2->is_slave];
3061 sh2->p_sdram = Pico32xMem->sdram;
3062 sh2->p_rom = Pico.rom;
3063}
3064
3065int sh2_drc_init(SH2 *sh2)
3066{
3067 int i;
3068
3069 if (block_tables[0] == NULL)
3070 {
3071 for (i = 0; i < TCACHE_BUFFERS; i++) {
3072 block_tables[i] = calloc(block_max_counts[i], sizeof(*block_tables[0]));
3073 if (block_tables[i] == NULL)
3074 goto fail;
3075 // max 2 block links (exits) per block
3076 block_link_pool[i] = calloc(block_link_pool_max_counts[i],
3077 sizeof(*block_link_pool[0]));
3078 if (block_link_pool[i] == NULL)
3079 goto fail;
3080
3081 inval_lookup[i] = calloc(ram_sizes[i] / ADDR_TO_BLOCK_PAGE,
3082 sizeof(inval_lookup[0]));
3083 if (inval_lookup[i] == NULL)
3084 goto fail;
3085
3086 hash_tables[i] = calloc(hash_table_sizes[i], sizeof(*hash_tables[0]));
3087 if (hash_tables[i] == NULL)
3088 goto fail;
3089 }
3090 memset(block_counts, 0, sizeof(block_counts));
3091 memset(block_link_pool_counts, 0, sizeof(block_link_pool_counts));
3092
3093 drc_cmn_init();
3094 tcache_ptr = tcache;
3095 sh2_generate_utils();
3096 host_instructions_updated(tcache, tcache_ptr);
3097
3098 tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
3099 for (i = 1; i < ARRAY_SIZE(tcache_bases); i++)
3100 tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
3101
3102#if (DRC_DEBUG & 4)
3103 for (i = 0; i < ARRAY_SIZE(block_tables); i++)
3104 tcache_dsm_ptrs[i] = tcache_bases[i];
3105 // disasm the utils
3106 tcache_dsm_ptrs[0] = tcache;
3107 do_host_disasm(0);
3108#endif
3109#if (DRC_DEBUG & 1)
3110 hash_collisions = 0;
3111#endif
3112 }
3113
3114 return 0;
3115
3116fail:
3117 sh2_drc_finish(sh2);
3118 return -1;
3119}
3120
3121void sh2_drc_finish(SH2 *sh2)
3122{
3123 int i;
3124
3125 if (block_tables[0] == NULL)
3126 return;
3127
3128 sh2_drc_flush_all();
3129
3130 for (i = 0; i < TCACHE_BUFFERS; i++) {
3131#if (DRC_DEBUG & 4)
3132 printf("~~~ tcache %d\n", i);
3133 tcache_dsm_ptrs[i] = tcache_bases[i];
3134 tcache_ptr = tcache_ptrs[i];
3135 do_host_disasm(i);
3136#endif
3137
3138 if (block_tables[i] != NULL)
3139 free(block_tables[i]);
3140 block_tables[i] = NULL;
3141 if (block_link_pool[i] == NULL)
3142 free(block_link_pool[i]);
3143 block_link_pool[i] = NULL;
3144
3145 if (inval_lookup[i] == NULL)
3146 free(inval_lookup[i]);
3147 inval_lookup[i] = NULL;
3148
3149 if (hash_tables[i] != NULL) {
3150 free(hash_tables[i]);
3151 hash_tables[i] = NULL;
3152 }
3153 }
3154
3155 drc_cmn_cleanup();
3156}
3157
3158#endif /* DRC_SH2 */
3159
3160static void *dr_get_pc_base(u32 pc, int is_slave)
3161{
3162 void *ret = NULL;
3163 u32 mask = 0;
3164
3165 if ((pc & ~0x7ff) == 0) {
3166 // BIOS
3167 ret = is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
3168 mask = 0x7ff;
3169 }
3170 else if ((pc & 0xfffff000) == 0xc0000000) {
3171 // data array
3172 ret = Pico32xMem->data_array[is_slave];
3173 mask = 0xfff;
3174 }
3175 else if ((pc & 0xc6000000) == 0x06000000) {
3176 // SDRAM
3177 ret = Pico32xMem->sdram;
3178 mask = 0x03ffff;
3179 }
3180 else if ((pc & 0xc6000000) == 0x02000000) {
3181 // ROM
3182 ret = Pico.rom;
3183 mask = 0x3fffff;
3184 }
3185
3186 if (ret == NULL)
3187 return (void *)-1; // NULL is valid value
3188
3189 return (char *)ret - (pc & ~mask);
3190}
3191
3192void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc_out,
3193 u32 *end_literals_out)
3194{
3195 u16 *dr_pc_base;
3196 u32 pc, op, tmp;
3197 u32 end_pc, end_literals = 0;
3198 struct op_data *opd;
3199 int next_is_delay = 0;
3200 int end_block = 0;
3201 int i, i_end;
3202
3203 memset(op_flags, 0, BLOCK_INSN_LIMIT);
3204
3205 dr_pc_base = dr_get_pc_base(base_pc, is_slave);
3206
3207 // 1st pass: disassemble
3208 for (i = 0, pc = base_pc; ; i++, pc += 2) {
3209 // we need an ops[] entry after the last one initialized,
3210 // so do it before end_block checks
3211 opd = &ops[i];
3212 opd->op = OP_UNHANDLED;
3213 opd->rm = -1;
3214 opd->source = opd->dest = 0;
3215 opd->cycles = 1;
3216 opd->imm = 0;
3217
3218 if (next_is_delay) {
3219 op_flags[i] |= OF_DELAY_OP;
3220 next_is_delay = 0;
3221 }
3222 else if (end_block || i >= BLOCK_INSN_LIMIT - 2)
3223 break;
3224
3225 op = FETCH_OP(pc);
3226 switch ((op & 0xf000) >> 12)
3227 {
3228 /////////////////////////////////////////////
3229 case 0x00:
3230 switch (op & 0x0f)
3231 {
3232 case 0x02:
3233 switch (GET_Fx())
3234 {
3235 case 0: // STC SR,Rn 0000nnnn00000010
3236 tmp = SHR_SR;
3237 break;
3238 case 1: // STC GBR,Rn 0000nnnn00010010
3239 tmp = SHR_GBR;
3240 break;
3241 case 2: // STC VBR,Rn 0000nnnn00100010
3242 tmp = SHR_VBR;
3243 break;
3244 default:
3245 goto undefined;
3246 }
3247 opd->op = OP_MOVE;
3248 opd->source = BITMASK1(tmp);
3249 opd->dest = BITMASK1(GET_Rn());
3250 break;
3251 case 0x03:
3252 CHECK_UNHANDLED_BITS(0xd0, undefined);
3253 // BRAF Rm 0000mmmm00100011
3254 // BSRF Rm 0000mmmm00000011
3255 opd->op = OP_BRANCH_RF;
3256 opd->rm = GET_Rn();
3257 opd->source = BITMASK1(opd->rm);
3258 opd->dest = BITMASK1(SHR_PC);
3259 if (!(op & 0x20))
3260 opd->dest |= BITMASK1(SHR_PR);
3261 opd->cycles = 2;
3262 next_is_delay = 1;
3263 end_block = 1;
3264 break;
3265 case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
3266 case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
3267 case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
3268 opd->source = BITMASK3(GET_Rm(), SHR_R0, GET_Rn());
3269 break;
3270 case 0x07:
3271 // MUL.L Rm,Rn 0000nnnnmmmm0111
3272 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3273 opd->dest = BITMASK1(SHR_MACL);
3274 opd->cycles = 2;
3275 break;
3276 case 0x08:
3277 CHECK_UNHANDLED_BITS(0xf00, undefined);
3278 switch (GET_Fx())
3279 {
3280 case 0: // CLRT 0000000000001000
3281 opd->op = OP_SETCLRT;
3282 opd->dest = BITMASK1(SHR_T);
3283 opd->imm = 0;
3284 break;
3285 case 1: // SETT 0000000000011000
3286 opd->op = OP_SETCLRT;
3287 opd->dest = BITMASK1(SHR_T);
3288 opd->imm = 1;
3289 break;
3290 case 2: // CLRMAC 0000000000101000
3291 opd->dest = BITMASK3(SHR_T, SHR_MACL, SHR_MACH);
3292 break;
3293 default:
3294 goto undefined;
3295 }
3296 break;
3297 case 0x09:
3298 switch (GET_Fx())
3299 {
3300 case 0: // NOP 0000000000001001
3301 CHECK_UNHANDLED_BITS(0xf00, undefined);
3302 break;
3303 case 1: // DIV0U 0000000000011001
3304 CHECK_UNHANDLED_BITS(0xf00, undefined);
3305 opd->dest = BITMASK2(SHR_SR, SHR_T);
3306 break;
3307 case 2: // MOVT Rn 0000nnnn00101001
3308 opd->source = BITMASK1(SHR_T);
3309 opd->dest = BITMASK1(GET_Rn());
3310 break;
3311 default:
3312 goto undefined;
3313 }
3314 break;
3315 case 0x0a:
3316 switch (GET_Fx())
3317 {
3318 case 0: // STS MACH,Rn 0000nnnn00001010
3319 tmp = SHR_MACH;
3320 break;
3321 case 1: // STS MACL,Rn 0000nnnn00011010
3322 tmp = SHR_MACL;
3323 break;
3324 case 2: // STS PR,Rn 0000nnnn00101010
3325 tmp = SHR_PR;
3326 break;
3327 default:
3328 goto undefined;
3329 }
3330 opd->op = OP_MOVE;
3331 opd->source = BITMASK1(tmp);
3332 opd->dest = BITMASK1(GET_Rn());
3333 break;
3334 case 0x0b:
3335 CHECK_UNHANDLED_BITS(0xf00, undefined);
3336 switch (GET_Fx())
3337 {
3338 case 0: // RTS 0000000000001011
3339 opd->op = OP_BRANCH_R;
3340 opd->rm = SHR_PR;
3341 opd->source = BITMASK1(opd->rm);
3342 opd->dest = BITMASK1(SHR_PC);
3343 opd->cycles = 2;
3344 next_is_delay = 1;
3345 end_block = 1;
3346 break;
3347 case 1: // SLEEP 0000000000011011
3348 opd->op = OP_SLEEP;
3349 end_block = 1;
3350 break;
3351 case 2: // RTE 0000000000101011
3352 opd->op = OP_RTE;
3353 opd->source = BITMASK1(SHR_SP);
3354 opd->dest = BITMASK2(SHR_SR, SHR_PC);
3355 opd->cycles = 4;
3356 next_is_delay = 1;
3357 end_block = 1;
3358 break;
3359 default:
3360 goto undefined;
3361 }
3362 break;
3363 case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
3364 case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
3365 case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
3366 opd->source = BITMASK2(GET_Rm(), SHR_R0);
3367 opd->dest = BITMASK1(GET_Rn());
3368 break;
3369 case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
3370 opd->source = BITMASK5(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH);
3371 opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
3372 opd->cycles = 3;
3373 break;
3374 default:
3375 goto undefined;
3376 }
3377 break;
3378
3379 /////////////////////////////////////////////
3380 case 0x01:
3381 // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
3382 opd->source = BITMASK1(GET_Rm());
3383 opd->source = BITMASK1(GET_Rn());
3384 opd->imm = (op & 0x0f) * 4;
3385 break;
3386
3387 /////////////////////////////////////////////
3388 case 0x02:
3389 switch (op & 0x0f)
3390 {
3391 case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
3392 case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
3393 case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
3394 opd->source = BITMASK1(GET_Rm());
3395 opd->source = BITMASK1(GET_Rn());
3396 break;
3397 case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100
3398 case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101
3399 case 0x06: // MOV.L Rm,@–Rn 0010nnnnmmmm0110
3400 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3401 opd->dest = BITMASK1(GET_Rn());
3402 break;
3403 case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
3404 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3405 opd->dest = BITMASK1(SHR_SR);
3406 break;
3407 case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
3408 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3409 opd->dest = BITMASK1(SHR_T);
3410 break;
3411 case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
3412 case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
3413 case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
3414 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3415 opd->dest = BITMASK1(GET_Rn());
3416 break;
3417 case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
3418 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3419 opd->dest = BITMASK1(SHR_T);
3420 break;
3421 case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
3422 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3423 opd->dest = BITMASK1(GET_Rn());
3424 break;
3425 case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
3426 case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
3427 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3428 opd->dest = BITMASK1(SHR_MACL);
3429 break;
3430 default:
3431 goto undefined;
3432 }
3433 break;
3434
3435 /////////////////////////////////////////////
3436 case 0x03:
3437 switch (op & 0x0f)
3438 {
3439 case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
3440 case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
3441 case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
3442 case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
3443 case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
3444 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3445 opd->dest = BITMASK1(SHR_T);
3446 break;
3447 case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
3448 opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_SR);
3449 opd->dest = BITMASK2(GET_Rn(), SHR_SR);
3450 break;
3451 case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
3452 case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
3453 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3454 opd->dest = BITMASK2(SHR_MACL, SHR_MACH);
3455 opd->cycles = 2;
3456 break;
3457 case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
3458 case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
3459 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3460 opd->dest = BITMASK1(GET_Rn());
3461 break;
3462 case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
3463 case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
3464 opd->source = BITMASK3(GET_Rm(), GET_Rn(), SHR_T);
3465 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3466 break;
3467 case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
3468 case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
3469 opd->source = BITMASK2(GET_Rm(), GET_Rn());
3470 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3471 break;
3472 default:
3473 goto undefined;
3474 }
3475 break;
3476
3477 /////////////////////////////////////////////
3478 case 0x04:
3479 switch (op & 0x0f)
3480 {
3481 case 0x00:
3482 switch (GET_Fx())
3483 {
3484 case 0: // SHLL Rn 0100nnnn00000000
3485 case 2: // SHAL Rn 0100nnnn00100000
3486 opd->source = BITMASK1(GET_Rn());
3487 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3488 break;
3489 case 1: // DT Rn 0100nnnn00010000
3490 opd->source = BITMASK1(GET_Rn());
3491 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3492 break;
3493 default:
3494 goto undefined;
3495 }
3496 break;
3497 case 0x01:
3498 switch (GET_Fx())
3499 {
3500 case 0: // SHLR Rn 0100nnnn00000001
3501 case 2: // SHAR Rn 0100nnnn00100001
3502 opd->source = BITMASK1(GET_Rn());
3503 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3504 break;
3505 case 1: // CMP/PZ Rn 0100nnnn00010001
3506 opd->source = BITMASK1(GET_Rn());
3507 opd->dest = BITMASK1(SHR_T);
3508 break;
3509 default:
3510 goto undefined;
3511 }
3512 break;
3513 case 0x02:
3514 case 0x03:
3515 switch (op & 0x3f)
3516 {
3517 case 0x02: // STS.L MACH,@–Rn 0100nnnn00000010
3518 tmp = SHR_MACH;
3519 break;
3520 case 0x12: // STS.L MACL,@–Rn 0100nnnn00010010
3521 tmp = SHR_MACL;
3522 break;
3523 case 0x22: // STS.L PR,@–Rn 0100nnnn00100010
3524 tmp = SHR_PR;
3525 break;
3526 case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
3527 tmp = SHR_SR;
3528 opd->cycles = 2;
3529 break;
3530 case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
3531 tmp = SHR_GBR;
3532 opd->cycles = 2;
3533 break;
3534 case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
3535 tmp = SHR_VBR;
3536 opd->cycles = 2;
3537 break;
3538 default:
3539 goto undefined;
3540 }
3541 opd->source = BITMASK2(GET_Rn(), tmp);
3542 opd->dest = BITMASK1(GET_Rn());
3543 break;
3544 case 0x04:
3545 case 0x05:
3546 switch (op & 0x3f)
3547 {
3548 case 0x04: // ROTL Rn 0100nnnn00000100
3549 case 0x05: // ROTR Rn 0100nnnn00000101
3550 opd->source = BITMASK1(GET_Rn());
3551 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3552 break;
3553 case 0x24: // ROTCL Rn 0100nnnn00100100
3554 case 0x25: // ROTCR Rn 0100nnnn00100101
3555 opd->source = BITMASK2(GET_Rn(), SHR_T);
3556 opd->dest = BITMASK2(GET_Rn(), SHR_T);
3557 break;
3558 case 0x15: // CMP/PL Rn 0100nnnn00010101
3559 opd->source = BITMASK1(GET_Rn());
3560 opd->dest = BITMASK1(SHR_T);
3561 break;
3562 default:
3563 goto undefined;
3564 }
3565 break;
3566 case 0x06:
3567 case 0x07:
3568 switch (op & 0x3f)
3569 {
3570 case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
3571 tmp = SHR_MACH;
3572 break;
3573 case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
3574 tmp = SHR_MACL;
3575 break;
3576 case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
3577 tmp = SHR_PR;
3578 break;
3579 case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
3580 tmp = SHR_SR;
3581 opd->cycles = 3;
3582 break;
3583 case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
3584 tmp = SHR_GBR;
3585 opd->cycles = 3;
3586 break;
3587 case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
3588 tmp = SHR_VBR;
3589 opd->cycles = 3;
3590 break;
3591 default:
3592 goto undefined;
3593 }
3594 opd->source = BITMASK1(GET_Rn());
3595 opd->dest = BITMASK2(GET_Rn(), tmp);
3596 break;
3597 case 0x08:
3598 case 0x09:
3599 switch (GET_Fx())
3600 {
3601 case 0:
3602 // SHLL2 Rn 0100nnnn00001000
3603 // SHLR2 Rn 0100nnnn00001001
3604 break;
3605 case 1:
3606 // SHLL8 Rn 0100nnnn00011000
3607 // SHLR8 Rn 0100nnnn00011001
3608 break;
3609 case 2:
3610 // SHLL16 Rn 0100nnnn00101000
3611 // SHLR16 Rn 0100nnnn00101001
3612 break;
3613 default:
3614 goto undefined;
3615 }
3616 opd->source = BITMASK1(GET_Rn());
3617 opd->dest = BITMASK1(GET_Rn());
3618 break;
3619 case 0x0a:
3620 switch (GET_Fx())
3621 {
3622 case 0: // LDS Rm,MACH 0100mmmm00001010
3623 tmp = SHR_MACH;
3624 break;
3625 case 1: // LDS Rm,MACL 0100mmmm00011010
3626 tmp = SHR_MACL;
3627 break;
3628 case 2: // LDS Rm,PR 0100mmmm00101010
3629 tmp = SHR_PR;
3630 break;
3631 default:
3632 goto undefined;
3633 }
3634 opd->op = OP_MOVE;
3635 opd->source = BITMASK1(GET_Rn());
3636 opd->dest = BITMASK1(tmp);
3637 break;
3638 case 0x0b:
3639 switch (GET_Fx())
3640 {
3641 case 0: // JSR @Rm 0100mmmm00001011
3642 opd->dest = BITMASK1(SHR_PR);
3643 case 2: // JMP @Rm 0100mmmm00101011
3644 opd->op = OP_BRANCH_R;
3645 opd->rm = GET_Rn();
3646 opd->source = BITMASK1(opd->rm);
3647 opd->dest |= BITMASK1(SHR_PC);
3648 opd->cycles = 2;
3649 next_is_delay = 1;
3650 end_block = 1;
3651 break;
3652 case 1: // TAS.B @Rn 0100nnnn00011011
3653 opd->source = BITMASK1(GET_Rn());
3654 opd->dest = BITMASK1(SHR_T);
3655 opd->cycles = 4;
3656 break;
3657 default:
3658 goto undefined;
3659 }
3660 break;
3661 case 0x0e:
3662 switch (GET_Fx())
3663 {
3664 case 0: // LDC Rm,SR 0100mmmm00001110
3665 tmp = SHR_SR;
3666 break;
3667 case 1: // LDC Rm,GBR 0100mmmm00011110
3668 tmp = SHR_GBR;
3669 break;
3670 case 2: // LDC Rm,VBR 0100mmmm00101110
3671 tmp = SHR_VBR;
3672 break;
3673 default:
3674 goto undefined;
3675 }
3676 opd->op = OP_MOVE;
3677 opd->source = BITMASK1(GET_Rn());
3678 opd->dest = BITMASK1(tmp);
3679 break;
3680 case 0x0f:
3681 // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
3682 opd->source = BITMASK5(GET_Rm(), GET_Rn(), SHR_SR, SHR_MACL, SHR_MACH);
3683 opd->dest = BITMASK4(GET_Rm(), GET_Rn(), SHR_MACL, SHR_MACH);
3684 opd->cycles = 3;
3685 break;
3686 default:
3687 goto undefined;
3688 }
3689 break;
3690
3691 /////////////////////////////////////////////
3692 case 0x05:
3693 // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
3694 opd->source = BITMASK1(GET_Rm());
3695 opd->dest = BITMASK1(GET_Rn());
3696 opd->imm = (op & 0x0f) * 4;
3697 break;
3698
3699 /////////////////////////////////////////////
3700 case 0x06:
3701 switch (op & 0x0f)
3702 {
3703 case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
3704 case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
3705 case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
3706 opd->dest = BITMASK1(GET_Rm());
3707 case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
3708 case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
3709 case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
3710 opd->source = BITMASK1(GET_Rm());
3711 opd->dest |= BITMASK1(GET_Rn());
3712 break;
3713 case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
3714 opd->op = OP_MOVE;
3715 goto arith_rmrn;
3716 case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
3717 case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
3718 case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
3719 case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
3720 case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
3721 case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
3722 case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
3723 case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
3724 case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
3725 arith_rmrn:
3726 opd->source = BITMASK1(GET_Rm());
3727 opd->dest = BITMASK1(GET_Rn());
3728 break;
3729 }
3730 break;
3731
3732 /////////////////////////////////////////////
3733 case 0x07:
3734 // ADD #imm,Rn 0111nnnniiiiiiii
3735 opd->source = opd->dest = BITMASK1(GET_Rn());
3736 opd->imm = (int)(signed char)op;
3737 break;
3738
3739 /////////////////////////////////////////////
3740 case 0x08:
3741 switch (op & 0x0f00)
3742 {
3743 case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
3744 opd->source = BITMASK2(GET_Rm(), SHR_R0);
3745 opd->imm = (op & 0x0f);
3746 break;
3747 case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
3748 opd->source = BITMASK2(GET_Rm(), SHR_R0);
3749 opd->imm = (op & 0x0f) * 2;
3750 break;
3751 case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
3752 opd->source = BITMASK1(GET_Rm());
3753 opd->dest = BITMASK1(SHR_R0);
3754 opd->imm = (op & 0x0f);
3755 break;
3756 case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
3757 opd->source = BITMASK1(GET_Rm());
3758 opd->dest = BITMASK1(SHR_R0);
3759 opd->imm = (op & 0x0f) * 2;
3760 break;
3761 case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
3762 opd->source = BITMASK1(SHR_R0);
3763 opd->dest = BITMASK1(SHR_T);
3764 opd->imm = (int)(signed char)op;
3765 break;
3766 case 0x0d00: // BT/S label 10001101dddddddd
3767 case 0x0f00: // BF/S label 10001111dddddddd
3768 next_is_delay = 1;
3769 // fallthrough
3770 case 0x0900: // BT label 10001001dddddddd
3771 case 0x0b00: // BF label 10001011dddddddd
3772 opd->op = (op & 0x0200) ? OP_BRANCH_CF : OP_BRANCH_CT;
3773 opd->source = BITMASK1(SHR_T);
3774 opd->dest = BITMASK1(SHR_PC);
3775 opd->imm = ((signed int)(op << 24) >> 23);
3776 opd->imm += pc + 4;
3777 if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
3778 op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
3779 break;
3780 default:
3781 goto undefined;
3782 }
3783 break;
3784
3785 /////////////////////////////////////////////
3786 case 0x09:
3787 // MOV.W @(disp,PC),Rn 1001nnnndddddddd
3788 opd->op = OP_LOAD_POOL;
3789 opd->source = BITMASK1(SHR_PC);
3790 opd->dest = BITMASK1(GET_Rn());
3791 opd->imm = pc + 4 + (op & 0xff) * 2;
3792 opd->size = 1;
3793 break;
3794
3795 /////////////////////////////////////////////
3796 case 0x0b:
3797 // BSR label 1011dddddddddddd
3798 opd->dest = BITMASK1(SHR_PR);
3799 case 0x0a:
3800 // BRA label 1010dddddddddddd
3801 opd->op = OP_BRANCH;
3802 opd->dest |= BITMASK1(SHR_PC);
3803 opd->imm = ((signed int)(op << 20) >> 19);
3804 opd->imm += pc + 4;
3805 opd->cycles = 2;
3806 next_is_delay = 1;
3807 end_block = 1;
3808 if (base_pc <= opd->imm && opd->imm < base_pc + BLOCK_INSN_LIMIT * 2)
3809 op_flags[(opd->imm - base_pc) / 2] |= OF_BTARGET;
3810 break;
3811
3812 /////////////////////////////////////////////
3813 case 0x0c:
3814 switch (op & 0x0f00)
3815 {
3816 case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
3817 case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
3818 case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
3819 opd->source = BITMASK2(SHR_GBR, SHR_R0);
3820 opd->size = (op & 0x300) >> 8;
3821 opd->imm = (op & 0xff) << opd->size;
3822 break;
3823 case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
3824 case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
3825 case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
3826 opd->source = BITMASK1(SHR_GBR);
3827 opd->dest = BITMASK1(SHR_R0);
3828 opd->size = (op & 0x300) >> 8;
3829 opd->imm = (op & 0xff) << opd->size;
3830 break;
3831 case 0x0300: // TRAPA #imm 11000011iiiiiiii
3832 opd->source = BITMASK2(SHR_PC, SHR_SR);
3833 opd->dest = BITMASK1(SHR_PC);
3834 opd->imm = (op & 0xff) * 4;
3835 opd->cycles = 8;
3836 end_block = 1; // FIXME
3837 break;
3838 case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
3839 opd->dest = BITMASK1(SHR_R0);
3840 opd->imm = (pc + 4 + (op & 0xff) * 4) & ~3;
3841 break;
3842 case 0x0800: // TST #imm,R0 11001000iiiiiiii
3843 opd->source = BITMASK1(SHR_R0);
3844 opd->dest = BITMASK1(SHR_T);
3845 opd->imm = op & 0xff;
3846 break;
3847 case 0x0900: // AND #imm,R0 11001001iiiiiiii
3848 opd->source = opd->dest = BITMASK1(SHR_R0);
3849 opd->imm = op & 0xff;
3850 break;
3851 case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
3852 opd->source = opd->dest = BITMASK1(SHR_R0);
3853 opd->imm = op & 0xff;
3854 break;
3855 case 0x0b00: // OR #imm,R0 11001011iiiiiiii
3856 opd->source = opd->dest = BITMASK1(SHR_R0);
3857 opd->imm = op & 0xff;
3858 break;
3859 case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
3860 opd->source = BITMASK2(SHR_GBR, SHR_R0);
3861 opd->dest = BITMASK1(SHR_T);
3862 opd->imm = op & 0xff;
3863 opd->cycles = 3;
3864 break;
3865 case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
3866 case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
3867 case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
3868 opd->source = BITMASK2(SHR_GBR, SHR_R0);
3869 opd->imm = op & 0xff;
3870 opd->cycles = 3;
3871 break;
3872 default:
3873 goto undefined;
3874 }
3875 break;
3876
3877 /////////////////////////////////////////////
3878 case 0x0d:
3879 // MOV.L @(disp,PC),Rn 1101nnnndddddddd
3880 opd->op = OP_LOAD_POOL;
3881 opd->source = BITMASK1(SHR_PC);
3882 opd->dest = BITMASK1(GET_Rn());
3883 opd->imm = (pc + 4 + (op & 0xff) * 2) & ~3;
3884 opd->size = 2;
3885 break;
3886
3887 /////////////////////////////////////////////
3888 case 0x0e:
3889 // MOV #imm,Rn 1110nnnniiiiiiii
3890 opd->dest = BITMASK1(GET_Rn());
3891 opd->imm = (u32)(signed int)(signed char)op;
3892 break;
3893
3894 default:
3895 undefined:
3896 elprintf(EL_ANOMALY, "%csh2 drc: unhandled op %04x @ %08x",
3897 is_slave ? 's' : 'm', op, pc);
3898 break;
3899 }
3900 }
3901 i_end = i;
3902 end_pc = pc;
3903
3904 // 2nd pass: some analysis
3905 for (i = 0; i < i_end; i++) {
3906 opd = &ops[i];
3907
3908 // propagate T (TODO: DIV0U)
3909 if ((opd->op == OP_SETCLRT && !opd->imm) || opd->op == OP_BRANCH_CT)
3910 op_flags[i + 1] |= OF_T_CLEAR;
3911 else if ((opd->op == OP_SETCLRT && opd->imm) || opd->op == OP_BRANCH_CF)
3912 op_flags[i + 1] |= OF_T_SET;
3913
3914 if ((op_flags[i] & OF_BTARGET) || (opd->dest & BITMASK1(SHR_T)))
3915 op_flags[i] &= ~(OF_T_SET | OF_T_CLEAR);
3916 else
3917 op_flags[i + 1] |= op_flags[i] & (OF_T_SET | OF_T_CLEAR);
3918
3919 if ((opd->op == OP_BRANCH_CT && (op_flags[i] & OF_T_SET))
3920 || (opd->op == OP_BRANCH_CF && (op_flags[i] & OF_T_CLEAR)))
3921 {
3922 opd->op = OP_BRANCH;
3923 opd->cycles = 3;
3924 i_end = i + 1;
3925 if (op_flags[i + 1] & OF_DELAY_OP) {
3926 opd->cycles = 2;
3927 i_end++;
3928 }
3929 }
3930 else if (opd->op == OP_LOAD_POOL)
3931 {
3932 if (opd->imm < end_pc + MAX_LITERAL_OFFSET) {
3933 if (end_literals < opd->imm + opd->size * 2)
3934 end_literals = opd->imm + opd->size * 2;
3935 }
3936 }
3937 }
3938 end_pc = base_pc + i_end * 2;
3939 if (end_literals < end_pc)
3940 end_literals = end_pc;
3941
3942 *end_pc_out = end_pc;
3943 if (end_literals_out != NULL)
3944 *end_literals_out = end_literals;
3945}
3946
3947// vim:shiftwidth=2:ts=2:expandtab