drc: rework invalidation
[picodrive.git] / cpu / sh2 / compiler.c
... / ...
CommitLineData
1/*
2 * SH2 recompiler
3 * (C) notaz, 2009,2010
4 *
5 * This work is licensed under the terms of MAME license.
6 * See COPYING file in the top-level directory.
7 *
8 * notes:
9 * - tcache, block descriptor, link buffer overflows result in sh2_translate()
10 * failure, followed by full tcache invalidation for that region
11 * - jumps between blocks are tracked for SMC handling (in block_links[]),
12 * except jumps between different tcaches
13 * - non-main block entries are called subblocks, as they have same tracking
14 * structures that main blocks have.
15 *
16 * implemented:
17 * - static register allocation
18 * - remaining register caching and tracking in temporaries
19 * - block-local branch linking
20 * - block linking (except between tcaches)
21 * - some constant propagation
22 *
23 * TODO:
24 * - better constant propagation
25 * - stack caching?
26 * - bug fixing
27 */
28#include <stddef.h>
29#include <stdio.h>
30#include <stdlib.h>
31#include <assert.h>
32
33#include "../../pico/pico_int.h"
34#include "sh2.h"
35#include "compiler.h"
36#include "../drc/cmn.h"
37#include "../debug.h"
38
39// features
40#define PROPAGATE_CONSTANTS 1
41#define LINK_BRANCHES 1
42
43// limits (per block)
44#define MAX_BLOCK_SIZE (BLOCK_CYCLE_LIMIT * 6 * 6)
45
46// max literal offset from the block end
47#define MAX_LITERAL_OFFSET 32*2
48#define MAX_LITERALS (BLOCK_CYCLE_LIMIT / 4)
49#define MAX_LOCAL_BRANCHES 32
50
51///
52#define FETCH_OP(pc) \
53 dr_pc_base[(pc) / 2]
54
55#define FETCH32(a) \
56 ((dr_pc_base[(a) / 2] << 16) | dr_pc_base[(a) / 2 + 1])
57
58#ifdef DRC_SH2
59
60// debug stuff
61// 1 - ?
62// 2 - ?
63// 4 - log asm
64// {
65#ifndef DRC_DEBUG
66#define DRC_DEBUG 0
67#endif
68
69#if DRC_DEBUG
70#define dbg(l,...) { \
71 if ((l) & DRC_DEBUG) \
72 elprintf(EL_STATUS, ##__VA_ARGS__); \
73}
74
75#include "mame/sh2dasm.h"
76#include <platform/libpicofe/linux/host_dasm.h>
77static int insns_compiled, hash_collisions, host_insn_count;
78#define COUNT_OP \
79 host_insn_count++
80#else // !DRC_DEBUG
81#define COUNT_OP
82#define dbg(...)
83#endif
84
85#if (DRC_DEBUG & 4)
86static u8 *tcache_dsm_ptrs[3];
87static char sh2dasm_buff[64];
88#define do_host_disasm(tcid) \
89 host_dasm(tcache_dsm_ptrs[tcid], tcache_ptr - tcache_dsm_ptrs[tcid]); \
90 tcache_dsm_ptrs[tcid] = tcache_ptr
91#else
92#define do_host_disasm(x)
93#endif
94
95#if (DRC_DEBUG & 8) || defined(PDB)
96static void REGPARM(3) *sh2_drc_log_entry(void *block, SH2 *sh2, u32 sr)
97{
98 if (block != NULL) {
99 dbg(8, "= %csh2 enter %08x %p, c=%d", sh2->is_slave ? 's' : 'm',
100 sh2->pc, block, (signed int)sr >> 12);
101 pdb_step(sh2, sh2->pc);
102 }
103 return block;
104}
105#endif
106// } debug
107
108#define TCACHE_BUFFERS 3
109
110// we have 3 translation cache buffers, split from one drc/cmn buffer.
111// BIOS shares tcache with data array because it's only used for init
112// and can be discarded early
113// XXX: need to tune sizes
114static const int tcache_sizes[TCACHE_BUFFERS] = {
115 DRC_TCACHE_SIZE * 6 / 8, // ROM, DRAM
116 DRC_TCACHE_SIZE / 8, // BIOS, data array in master sh2
117 DRC_TCACHE_SIZE / 8, // ... slave
118};
119
120static u8 *tcache_bases[TCACHE_BUFFERS];
121static u8 *tcache_ptrs[TCACHE_BUFFERS];
122
123// ptr for code emiters
124static u8 *tcache_ptr;
125
126typedef struct block_desc_ {
127 u32 addr; // SH2 PC address
128 u32 end_addr; // address after last op
129 void *tcache_ptr; // translated block for above PC
130 struct block_desc_ *next; // next block with the same PC hash
131#if (DRC_DEBUG & 2)
132 int refcount;
133#endif
134} block_desc;
135
136typedef struct block_link_ {
137 u32 target_pc;
138 void *jump; // insn address
139// struct block_link_ *next;
140} block_link;
141
142static const int block_max_counts[TCACHE_BUFFERS] = {
143 4*1024,
144 256,
145 256,
146};
147static block_desc *block_tables[TCACHE_BUFFERS];
148static block_link *block_links[TCACHE_BUFFERS];
149static int block_counts[TCACHE_BUFFERS];
150static int block_link_counts[TCACHE_BUFFERS];
151
152#define BLOCKID_OVERLAP 0xfffe
153#define BLOCKID_MAX block_max_counts[0]
154
155// host register tracking
156enum {
157 HR_FREE,
158 HR_CACHED, // 'val' has sh2_reg_e
159// HR_CONST, // 'val' has a constant
160 HR_TEMP, // reg used for temp storage
161};
162
163enum {
164 HRF_DIRTY = 1 << 0, // reg has "dirty" value to be written to ctx
165 HRF_LOCKED = 1 << 1, // HR_CACHED can't be evicted
166};
167
168typedef struct {
169 u32 hreg:5; // "host" reg
170 u32 greg:5; // "guest" reg
171 u32 type:3;
172 u32 flags:3;
173 u32 stamp:16; // kind of a timestamp
174} temp_reg_t;
175
176// note: reg_temp[] must have at least the amount of
177// registers used by handlers in worst case (currently 4)
178#ifdef __arm__
179#include "../drc/emit_arm.c"
180
181static const int reg_map_g2h[] = {
182 4, 5, 6, 7,
183 8, -1, -1, -1,
184 -1, -1, -1, -1,
185 -1, -1, -1, 9, // r12 .. sp
186 -1, -1, -1, 10, // SHR_PC, SHR_PPC, SHR_PR, SHR_SR,
187 -1, -1, -1, -1, // SHR_GBR, SHR_VBR, SHR_MACH, SHR_MACL,
188};
189
190static temp_reg_t reg_temp[] = {
191 { 0, },
192 { 1, },
193 { 12, },
194 { 14, },
195 { 2, },
196 { 3, },
197};
198
199#elif defined(__i386__)
200#include "../drc/emit_x86.c"
201
202static const int reg_map_g2h[] = {
203 xSI,-1, -1, -1,
204 -1, -1, -1, -1,
205 -1, -1, -1, -1,
206 -1, -1, -1, -1,
207 -1, -1, -1, xDI,
208 -1, -1, -1, -1,
209};
210
211// ax, cx, dx are usually temporaries by convention
212static temp_reg_t reg_temp[] = {
213 { xAX, },
214 { xBX, },
215 { xCX, },
216 { xDX, },
217};
218
219#else
220#error unsupported arch
221#endif
222
223#define T 0x00000001
224#define S 0x00000002
225#define I 0x000000f0
226#define Q 0x00000100
227#define M 0x00000200
228#define T_save 0x00000800
229
230#define I_SHIFT 4
231#define Q_SHIFT 8
232#define M_SHIFT 9
233
234// ROM hash table
235#define MAX_HASH_ENTRIES 1024
236#define HASH_MASK (MAX_HASH_ENTRIES - 1)
237static void **hash_table;
238
239#define HASH_FUNC(hash_tab, addr) \
240 ((block_desc **)(hash_tab))[(addr) & HASH_MASK]
241
242static void REGPARM(1) (*sh2_drc_entry)(SH2 *sh2);
243static void (*sh2_drc_dispatcher)(void);
244static void (*sh2_drc_exit)(void);
245static void (*sh2_drc_test_irq)(void);
246
247static u32 REGPARM(2) (*sh2_drc_read8)(u32 a, SH2 *sh2);
248static u32 REGPARM(2) (*sh2_drc_read16)(u32 a, SH2 *sh2);
249static u32 REGPARM(2) (*sh2_drc_read32)(u32 a, SH2 *sh2);
250static void REGPARM(2) (*sh2_drc_write8)(u32 a, u32 d);
251static void REGPARM(2) (*sh2_drc_write8_slot)(u32 a, u32 d);
252static void REGPARM(2) (*sh2_drc_write16)(u32 a, u32 d);
253static void REGPARM(2) (*sh2_drc_write16_slot)(u32 a, u32 d);
254static int REGPARM(3) (*sh2_drc_write32)(u32 a, u32 d, SH2 *sh2);
255
256// address space stuff
257static int dr_ctx_get_mem_ptr(u32 a, u32 *mask)
258{
259 int poffs = -1;
260
261 if ((a & ~0x7ff) == 0) {
262 // BIOS
263 poffs = offsetof(SH2, p_bios);
264 *mask = 0x7ff;
265 }
266 else if ((a & 0xfffff000) == 0xc0000000) {
267 // data array
268 poffs = offsetof(SH2, p_da);
269 *mask = 0xfff;
270 }
271 else if ((a & 0xc6000000) == 0x06000000) {
272 // SDRAM
273 poffs = offsetof(SH2, p_sdram);
274 *mask = 0x03ffff;
275 }
276 else if ((a & 0xc6000000) == 0x02000000) {
277 // ROM
278 poffs = offsetof(SH2, p_rom);
279 *mask = 0x3fffff;
280 }
281
282 return poffs;
283}
284
285static block_desc *dr_get_bd(u32 pc, int is_slave, int *tcache_id)
286{
287 *tcache_id = 0;
288
289 // we have full block id tables for data_array and RAM
290 // BIOS goes to data_array table too
291 if ((pc & 0xe0000000) == 0xc0000000 || (pc & ~0xfff) == 0) {
292 int blkid = Pico32xMem->drcblk_da[is_slave][(pc & 0xfff) >> SH2_DRCBLK_DA_SHIFT];
293 *tcache_id = 1 + is_slave;
294 if (blkid & 1)
295 return &block_tables[*tcache_id][blkid >> 1];
296 }
297 // RAM
298 else if ((pc & 0xc6000000) == 0x06000000) {
299 int blkid = Pico32xMem->drcblk_ram[(pc & 0x3ffff) >> SH2_DRCBLK_RAM_SHIFT];
300 if (blkid & 1)
301 return &block_tables[0][blkid >> 1];
302 }
303 // ROM
304 else if ((pc & 0xc6000000) == 0x02000000) {
305 block_desc *bd = HASH_FUNC(hash_table, pc);
306
307 for (; bd != NULL; bd = bd->next)
308 if (bd->addr == pc)
309 return bd;
310 }
311
312 return NULL;
313}
314
315// ---------------------------------------------------------------
316
317// block management
318static void REGPARM(1) flush_tcache(int tcid)
319{
320 dbg(1, "tcache #%d flush! (%d/%d, bds %d/%d)", tcid,
321 tcache_ptrs[tcid] - tcache_bases[tcid], tcache_sizes[tcid],
322 block_counts[tcid], block_max_counts[tcid]);
323
324 block_counts[tcid] = 0;
325 block_link_counts[tcid] = 0;
326 tcache_ptrs[tcid] = tcache_bases[tcid];
327 if (tcid == 0) { // ROM, RAM
328 memset(hash_table, 0, sizeof(hash_table[0]) * MAX_HASH_ENTRIES);
329 memset(Pico32xMem->drcblk_ram, 0, sizeof(Pico32xMem->drcblk_ram));
330 }
331 else
332 memset(Pico32xMem->drcblk_da[tcid - 1], 0, sizeof(Pico32xMem->drcblk_da[0]));
333#if (DRC_DEBUG & 4)
334 tcache_dsm_ptrs[tcid] = tcache_bases[tcid];
335#endif
336}
337
338#if LINK_BRANCHES
339// add block links (tracked branches)
340static int dr_add_block_link(u32 target_pc, void *jump, int tcache_id)
341{
342 block_link *bl = block_links[tcache_id];
343 int cnt = block_link_counts[tcache_id];
344
345 if (cnt >= block_max_counts[tcache_id] * 2) {
346 dbg(1, "bl overflow for tcache %d\n", tcache_id);
347 return -1;
348 }
349
350 bl[cnt].target_pc = target_pc;
351 bl[cnt].jump = jump;
352 block_link_counts[tcache_id]++;
353
354 return 0;
355}
356#endif
357
358static block_desc *dr_add_block(u32 addr, u32 end_addr, int is_slave, int *blk_id)
359{
360 block_desc *bd;
361 int tcache_id;
362 int *bcount;
363
364 bd = dr_get_bd(addr, is_slave, &tcache_id);
365 if (bd != NULL) {
366 dbg(2, "block override for %08x", addr);
367 bd->tcache_ptr = tcache_ptr;
368 bd->end_addr = end_addr;
369 *blk_id = bd - block_tables[tcache_id];
370 return bd;
371 }
372
373 bcount = &block_counts[tcache_id];
374 if (*bcount >= block_max_counts[tcache_id]) {
375 dbg(1, "bd overflow for tcache %d", tcache_id);
376 return NULL;
377 }
378 if (*bcount == 0)
379 (*bcount)++; // not using descriptor 0
380
381 bd = &block_tables[tcache_id][*bcount];
382 bd->addr = addr;
383 bd->end_addr = end_addr;
384 bd->tcache_ptr = tcache_ptr;
385 *blk_id = *bcount;
386 (*bcount)++;
387
388 if ((addr & 0xc6000000) == 0x02000000) { // ROM
389 bd->next = HASH_FUNC(hash_table, addr);
390 HASH_FUNC(hash_table, addr) = bd;
391#if (DRC_DEBUG & 2)
392 if (bd->next != NULL) {
393 printf(" hash collision with %08x\n", bd->next->addr);
394 hash_collisions++;
395 }
396#endif
397 }
398
399 return bd;
400}
401
402static void REGPARM(3) *dr_lookup_block(u32 pc, int is_slave, int *tcache_id)
403{
404 block_desc *bd = NULL;
405 void *block = NULL;
406
407 bd = dr_get_bd(pc, is_slave, tcache_id);
408 if (bd != NULL)
409 block = bd->tcache_ptr;
410
411#if (DRC_DEBUG & 2)
412 if (bd != NULL)
413 bd->refcount++;
414#endif
415 return block;
416}
417
418static void *dr_failure(void)
419{
420 lprintf("recompilation failed\n");
421 exit(1);
422}
423
424static void *dr_prepare_ext_branch(u32 pc, SH2 *sh2, int tcache_id)
425{
426#if LINK_BRANCHES
427 int target_tcache_id;
428 void *target;
429 int ret;
430
431 target = dr_lookup_block(pc, sh2->is_slave, &target_tcache_id);
432 if (target_tcache_id == tcache_id) {
433 // allow linking blocks only from local cache
434 ret = dr_add_block_link(pc, tcache_ptr, tcache_id);
435 if (ret < 0)
436 return NULL;
437 }
438 if (target == NULL || target_tcache_id != tcache_id)
439 target = sh2_drc_dispatcher;
440
441 return target;
442#else
443 return sh2_drc_dispatcher;
444#endif
445}
446
447static void dr_link_blocks(void *target, u32 pc, int tcache_id)
448{
449#if LINK_BRANCHES
450 block_link *bl = block_links[tcache_id];
451 int cnt = block_link_counts[tcache_id];
452 int i;
453
454 for (i = 0; i < cnt; i++) {
455 if (bl[i].target_pc == pc) {
456 dbg(2, "- link from %p", bl[i].jump);
457 emith_jump_patch(bl[i].jump, target);
458 // XXX: sync ARM caches (old jump should be fine)?
459 }
460 }
461#endif
462}
463
464#define ADD_TO_ARRAY(array, count, item, failcode) \
465 array[count++] = item; \
466 if (count >= ARRAY_SIZE(array)) { \
467 dbg(1, "warning: " #array " overflow"); \
468 failcode; \
469 }
470
471static int find_in_array(u32 *array, size_t size, u32 what)
472{
473 size_t i;
474 for (i = 0; i < size; i++)
475 if (what == array[i])
476 return i;
477
478 return -1;
479}
480
481// ---------------------------------------------------------------
482
483// register cache / constant propagation stuff
484typedef enum {
485 RC_GR_READ,
486 RC_GR_WRITE,
487 RC_GR_RMW,
488} rc_gr_mode;
489
490static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking);
491
492// guest regs with constants
493static u32 dr_gcregs[24];
494// a mask of constant/dirty regs
495static u32 dr_gcregs_mask;
496static u32 dr_gcregs_dirty;
497
498#if PROPAGATE_CONSTANTS
499static void gconst_new(sh2_reg_e r, u32 val)
500{
501 int i;
502
503 dr_gcregs_mask |= 1 << r;
504 dr_gcregs_dirty |= 1 << r;
505 dr_gcregs[r] = val;
506
507 // throw away old r that we might have cached
508 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
509 if ((reg_temp[i].type == HR_CACHED) &&
510 reg_temp[i].greg == r) {
511 reg_temp[i].type = HR_FREE;
512 reg_temp[i].flags = 0;
513 }
514 }
515}
516#endif
517
518static int gconst_get(sh2_reg_e r, u32 *val)
519{
520 if (dr_gcregs_mask & (1 << r)) {
521 *val = dr_gcregs[r];
522 return 1;
523 }
524 return 0;
525}
526
527static int gconst_check(sh2_reg_e r)
528{
529 if ((dr_gcregs_mask | dr_gcregs_dirty) & (1 << r))
530 return 1;
531 return 0;
532}
533
534// update hr if dirty, else do nothing
535static int gconst_try_read(int hr, sh2_reg_e r)
536{
537 if (dr_gcregs_dirty & (1 << r)) {
538 emith_move_r_imm(hr, dr_gcregs[r]);
539 dr_gcregs_dirty &= ~(1 << r);
540 return 1;
541 }
542 return 0;
543}
544
545static void gconst_check_evict(sh2_reg_e r)
546{
547 if (dr_gcregs_mask & (1 << r))
548 // no longer cached in reg, make dirty again
549 dr_gcregs_dirty |= 1 << r;
550}
551
552static void gconst_kill(sh2_reg_e r)
553{
554 dr_gcregs_mask &= ~(1 << r);
555 dr_gcregs_dirty &= ~(1 << r);
556}
557
558static void gconst_clean(void)
559{
560 int i;
561
562 for (i = 0; i < ARRAY_SIZE(dr_gcregs); i++)
563 if (dr_gcregs_dirty & (1 << i)) {
564 // using RC_GR_READ here: it will call gconst_try_read,
565 // cache the reg and mark it dirty.
566 rcache_get_reg_(i, RC_GR_READ, 0);
567 }
568}
569
570static void gconst_invalidate(void)
571{
572 dr_gcregs_mask = dr_gcregs_dirty = 0;
573}
574
575static u16 rcache_counter;
576
577static temp_reg_t *rcache_evict(void)
578{
579 // evict reg with oldest stamp
580 int i, oldest = -1;
581 u16 min_stamp = (u16)-1;
582
583 for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
584 if (reg_temp[i].type == HR_CACHED && !(reg_temp[i].flags & HRF_LOCKED) &&
585 reg_temp[i].stamp <= min_stamp) {
586 min_stamp = reg_temp[i].stamp;
587 oldest = i;
588 }
589 }
590
591 if (oldest == -1) {
592 printf("no registers to evict, aborting\n");
593 exit(1);
594 }
595
596 i = oldest;
597 if (reg_temp[i].type == HR_CACHED) {
598 if (reg_temp[i].flags & HRF_DIRTY)
599 // writeback
600 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
601 gconst_check_evict(reg_temp[i].greg);
602 }
603
604 reg_temp[i].type = HR_FREE;
605 reg_temp[i].flags = 0;
606 return &reg_temp[i];
607}
608
609static int get_reg_static(sh2_reg_e r, rc_gr_mode mode)
610{
611 int i = reg_map_g2h[r];
612 if (i != -1) {
613 if (mode != RC_GR_WRITE)
614 gconst_try_read(i, r);
615 }
616 return i;
617}
618
619// note: must not be called when doing conditional code
620static int rcache_get_reg_(sh2_reg_e r, rc_gr_mode mode, int do_locking)
621{
622 temp_reg_t *tr;
623 int i, ret;
624
625 // maybe statically mapped?
626 ret = get_reg_static(r, mode);
627 if (ret != -1)
628 goto end;
629
630 rcache_counter++;
631
632 // maybe already cached?
633 // if so, prefer against gconst (they must be in sync)
634 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
635 if (reg_temp[i].type == HR_CACHED && reg_temp[i].greg == r) {
636 reg_temp[i].stamp = rcache_counter;
637 if (mode != RC_GR_READ)
638 reg_temp[i].flags |= HRF_DIRTY;
639 ret = reg_temp[i].hreg;
640 goto end;
641 }
642 }
643
644 // use any free reg
645 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
646 if (reg_temp[i].type == HR_FREE) {
647 tr = &reg_temp[i];
648 goto do_alloc;
649 }
650 }
651
652 tr = rcache_evict();
653
654do_alloc:
655 tr->type = HR_CACHED;
656 if (do_locking)
657 tr->flags |= HRF_LOCKED;
658 if (mode != RC_GR_READ)
659 tr->flags |= HRF_DIRTY;
660 tr->greg = r;
661 tr->stamp = rcache_counter;
662 ret = tr->hreg;
663
664 if (mode != RC_GR_WRITE) {
665 if (gconst_check(r)) {
666 if (gconst_try_read(ret, r))
667 tr->flags |= HRF_DIRTY;
668 }
669 else
670 emith_ctx_read(tr->hreg, r * 4);
671 }
672
673end:
674 if (mode != RC_GR_READ)
675 gconst_kill(r);
676
677 return ret;
678}
679
680static int rcache_get_reg(sh2_reg_e r, rc_gr_mode mode)
681{
682 return rcache_get_reg_(r, mode, 1);
683}
684
685static int rcache_get_tmp(void)
686{
687 temp_reg_t *tr;
688 int i;
689
690 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
691 if (reg_temp[i].type == HR_FREE) {
692 tr = &reg_temp[i];
693 goto do_alloc;
694 }
695
696 tr = rcache_evict();
697
698do_alloc:
699 tr->type = HR_TEMP;
700 return tr->hreg;
701}
702
703static int rcache_get_arg_id(int arg)
704{
705 int i, r = 0;
706 host_arg2reg(r, arg);
707
708 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
709 if (reg_temp[i].hreg == r)
710 break;
711
712 if (i == ARRAY_SIZE(reg_temp)) // can't happen
713 exit(1);
714
715 if (reg_temp[i].type == HR_CACHED) {
716 // writeback
717 if (reg_temp[i].flags & HRF_DIRTY)
718 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
719 gconst_check_evict(reg_temp[i].greg);
720 }
721 else if (reg_temp[i].type == HR_TEMP) {
722 printf("arg %d reg %d already used, aborting\n", arg, r);
723 exit(1);
724 }
725
726 reg_temp[i].type = HR_FREE;
727 reg_temp[i].flags = 0;
728
729 return i;
730}
731
732// get a reg to be used as function arg
733static int rcache_get_tmp_arg(int arg)
734{
735 int id = rcache_get_arg_id(arg);
736 reg_temp[id].type = HR_TEMP;
737
738 return reg_temp[id].hreg;
739}
740
741// same but caches a reg. RC_GR_READ only.
742static int rcache_get_reg_arg(int arg, sh2_reg_e r)
743{
744 int i, srcr, dstr, dstid;
745 int dirty = 0, src_dirty = 0;
746
747 dstid = rcache_get_arg_id(arg);
748 dstr = reg_temp[dstid].hreg;
749
750 // maybe already statically mapped?
751 srcr = get_reg_static(r, RC_GR_READ);
752 if (srcr != -1)
753 goto do_cache;
754
755 // maybe already cached?
756 for (i = ARRAY_SIZE(reg_temp) - 1; i >= 0; i--) {
757 if ((reg_temp[i].type == HR_CACHED) &&
758 reg_temp[i].greg == r)
759 {
760 srcr = reg_temp[i].hreg;
761 if (reg_temp[i].flags & HRF_DIRTY)
762 src_dirty = 1;
763 goto do_cache;
764 }
765 }
766
767 // must read
768 srcr = dstr;
769 if (gconst_check(r)) {
770 if (gconst_try_read(srcr, r))
771 dirty = 1;
772 }
773 else
774 emith_ctx_read(srcr, r * 4);
775
776do_cache:
777 if (dstr != srcr)
778 emith_move_r_r(dstr, srcr);
779#if 1
780 else
781 dirty |= src_dirty;
782
783 if (dirty)
784 // must clean, callers might want to modify the arg before call
785 emith_ctx_write(dstr, r * 4);
786#else
787 if (dirty)
788 reg_temp[dstid].flags |= HRF_DIRTY;
789#endif
790
791 reg_temp[dstid].stamp = ++rcache_counter;
792 reg_temp[dstid].type = HR_CACHED;
793 reg_temp[dstid].greg = r;
794 reg_temp[dstid].flags |= HRF_LOCKED;
795 return dstr;
796}
797
798static void rcache_free_tmp(int hr)
799{
800 int i;
801 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
802 if (reg_temp[i].hreg == hr)
803 break;
804
805 if (i == ARRAY_SIZE(reg_temp) || reg_temp[i].type != HR_TEMP) {
806 printf("rcache_free_tmp fail: #%i hr %d, type %d\n", i, hr, reg_temp[i].type);
807 return;
808 }
809
810 reg_temp[i].type = HR_FREE;
811 reg_temp[i].flags = 0;
812}
813
814static void rcache_unlock(int hr)
815{
816 int i;
817 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
818 if (reg_temp[i].type == HR_CACHED && reg_temp[i].hreg == hr)
819 reg_temp[i].flags &= ~HRF_LOCKED;
820}
821
822static void rcache_unlock_all(void)
823{
824 int i;
825 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
826 reg_temp[i].flags &= ~HRF_LOCKED;
827}
828
829static void rcache_clean(void)
830{
831 int i;
832 gconst_clean();
833
834 for (i = 0; i < ARRAY_SIZE(reg_temp); i++)
835 if (reg_temp[i].type == HR_CACHED && (reg_temp[i].flags & HRF_DIRTY)) {
836 // writeback
837 emith_ctx_write(reg_temp[i].hreg, reg_temp[i].greg * 4);
838 reg_temp[i].flags &= ~HRF_DIRTY;
839 }
840}
841
842static void rcache_invalidate(void)
843{
844 int i;
845 for (i = 0; i < ARRAY_SIZE(reg_temp); i++) {
846 reg_temp[i].type = HR_FREE;
847 reg_temp[i].flags = 0;
848 }
849 rcache_counter = 0;
850
851 gconst_invalidate();
852}
853
854static void rcache_flush(void)
855{
856 rcache_clean();
857 rcache_invalidate();
858}
859
860// ---------------------------------------------------------------
861
862static int emit_get_rbase_and_offs(u32 a, u32 *offs)
863{
864 u32 mask = 0;
865 int poffs;
866 int hr;
867
868 poffs = dr_ctx_get_mem_ptr(a, &mask);
869 if (poffs == -1)
870 return -1;
871
872 // XXX: could use some related reg
873 hr = rcache_get_tmp();
874 emith_ctx_read(hr, poffs);
875 emith_add_r_imm(hr, a & mask & ~0xff);
876 *offs = a & 0xff; // XXX: ARM oriented..
877 return hr;
878}
879
880static void emit_move_r_imm32(sh2_reg_e dst, u32 imm)
881{
882#if PROPAGATE_CONSTANTS
883 gconst_new(dst, imm);
884#else
885 int hr = rcache_get_reg(dst, RC_GR_WRITE);
886 emith_move_r_imm(hr, imm);
887#endif
888}
889
890static void emit_move_r_r(sh2_reg_e dst, sh2_reg_e src)
891{
892 int hr_d = rcache_get_reg(dst, RC_GR_WRITE);
893 int hr_s = rcache_get_reg(src, RC_GR_READ);
894
895 emith_move_r_r(hr_d, hr_s);
896}
897
898// T must be clear, and comparison done just before this
899static void emit_or_t_if_eq(int srr)
900{
901 EMITH_SJMP_START(DCOND_NE);
902 emith_or_r_imm_c(DCOND_EQ, srr, T);
903 EMITH_SJMP_END(DCOND_NE);
904}
905
906// arguments must be ready
907// reg cache must be clean before call
908static int emit_memhandler_read_(int size, int ram_check)
909{
910 int arg0, arg1;
911 host_arg2reg(arg0, 0);
912
913 rcache_clean();
914
915 // must writeback cycles for poll detection stuff
916 // FIXME: rm
917 if (reg_map_g2h[SHR_SR] != -1)
918 emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
919
920 arg1 = rcache_get_tmp_arg(1);
921 emith_move_r_r(arg1, CONTEXT_REG);
922
923#ifndef PDB_NET
924 if (ram_check && Pico.rom == (void *)0x02000000 && Pico32xMem->sdram == (void *)0x06000000) {
925 int tmp = rcache_get_tmp();
926 emith_and_r_r_imm(tmp, arg0, 0xfb000000);
927 emith_cmp_r_imm(tmp, 0x02000000);
928 switch (size) {
929 case 0: // 8
930 EMITH_SJMP3_START(DCOND_NE);
931 emith_eor_r_imm_c(DCOND_EQ, arg0, 1);
932 emith_read8_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
933 EMITH_SJMP3_MID(DCOND_NE);
934 emith_call_cond(DCOND_NE, sh2_drc_read8);
935 EMITH_SJMP3_END();
936 break;
937 case 1: // 16
938 EMITH_SJMP3_START(DCOND_NE);
939 emith_read16_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
940 EMITH_SJMP3_MID(DCOND_NE);
941 emith_call_cond(DCOND_NE, sh2_drc_read16);
942 EMITH_SJMP3_END();
943 break;
944 case 2: // 32
945 EMITH_SJMP3_START(DCOND_NE);
946 emith_read_r_r_offs_c(DCOND_EQ, arg0, arg0, 0);
947 emith_ror_c(DCOND_EQ, arg0, arg0, 16);
948 EMITH_SJMP3_MID(DCOND_NE);
949 emith_call_cond(DCOND_NE, sh2_drc_read32);
950 EMITH_SJMP3_END();
951 break;
952 }
953 }
954 else
955#endif
956 {
957 switch (size) {
958 case 0: // 8
959 emith_call(sh2_drc_read8);
960 break;
961 case 1: // 16
962 emith_call(sh2_drc_read16);
963 break;
964 case 2: // 32
965 emith_call(sh2_drc_read32);
966 break;
967 }
968 }
969 rcache_invalidate();
970
971 if (reg_map_g2h[SHR_SR] != -1)
972 emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
973
974 // assuming arg0 and retval reg matches
975 return rcache_get_tmp_arg(0);
976}
977
978static int emit_memhandler_read(int size)
979{
980 return emit_memhandler_read_(size, 1);
981}
982
983static int emit_memhandler_read_rr(sh2_reg_e rd, sh2_reg_e rs, u32 offs, int size)
984{
985 int hr, hr2, ram_check = 1;
986 u32 val, offs2;
987
988 if (gconst_get(rs, &val)) {
989 hr = emit_get_rbase_and_offs(val + offs, &offs2);
990 if (hr != -1) {
991 hr2 = rcache_get_reg(rd, RC_GR_WRITE);
992 switch (size) {
993 case 0: // 8
994 emith_read8_r_r_offs(hr2, hr, offs2 ^ 1);
995 emith_sext(hr2, hr2, 8);
996 break;
997 case 1: // 16
998 emith_read16_r_r_offs(hr2, hr, offs2);
999 emith_sext(hr2, hr2, 16);
1000 break;
1001 case 2: // 32
1002 emith_read_r_r_offs(hr2, hr, offs2);
1003 emith_ror(hr2, hr2, 16);
1004 break;
1005 }
1006 rcache_free_tmp(hr);
1007 return hr2;
1008 }
1009
1010 ram_check = 0;
1011 }
1012
1013 hr = rcache_get_reg_arg(0, rs);
1014 if (offs != 0)
1015 emith_add_r_imm(hr, offs);
1016 hr = emit_memhandler_read_(size, ram_check);
1017 hr2 = rcache_get_reg(rd, RC_GR_WRITE);
1018 if (size != 2) {
1019 emith_sext(hr2, hr, (size == 1) ? 16 : 8);
1020 } else
1021 emith_move_r_r(hr2, hr);
1022 rcache_free_tmp(hr);
1023
1024 return hr2;
1025}
1026
1027static void emit_memhandler_write(int size, u32 pc, int delay)
1028{
1029 int ctxr;
1030 host_arg2reg(ctxr, 2);
1031 if (reg_map_g2h[SHR_SR] != -1)
1032 emith_ctx_write(reg_map_g2h[SHR_SR], SHR_SR * 4);
1033
1034 switch (size) {
1035 case 0: // 8
1036 // XXX: consider inlining sh2_drc_write8
1037 if (delay) {
1038 emith_call(sh2_drc_write8_slot);
1039 } else {
1040 emit_move_r_imm32(SHR_PC, pc);
1041 rcache_clean();
1042 emith_call(sh2_drc_write8);
1043 }
1044 break;
1045 case 1: // 16
1046 if (delay) {
1047 emith_call(sh2_drc_write16_slot);
1048 } else {
1049 emit_move_r_imm32(SHR_PC, pc);
1050 rcache_clean();
1051 emith_call(sh2_drc_write16);
1052 }
1053 break;
1054 case 2: // 32
1055 emith_move_r_r(ctxr, CONTEXT_REG);
1056 emith_call(sh2_drc_write32);
1057 break;
1058 }
1059
1060 if (reg_map_g2h[SHR_SR] != -1)
1061 emith_ctx_read(reg_map_g2h[SHR_SR], SHR_SR * 4);
1062 rcache_invalidate();
1063}
1064
1065// @(Rx,Ry)
1066static int emit_indirect_indexed_read(int rx, int ry, int size)
1067{
1068 int a0, t;
1069 a0 = rcache_get_reg_arg(0, rx);
1070 t = rcache_get_reg(ry, RC_GR_READ);
1071 emith_add_r_r(a0, t);
1072 return emit_memhandler_read(size);
1073}
1074
1075// read @Rn, @rm
1076static void emit_indirect_read_double(u32 *rnr, u32 *rmr, int rn, int rm, int size)
1077{
1078 int tmp;
1079
1080 rcache_get_reg_arg(0, rn);
1081 tmp = emit_memhandler_read(size);
1082 emith_ctx_write(tmp, offsetof(SH2, drc_tmp));
1083 rcache_free_tmp(tmp);
1084 tmp = rcache_get_reg(rn, RC_GR_RMW);
1085 emith_add_r_imm(tmp, 1 << size);
1086 rcache_unlock(tmp);
1087
1088 rcache_get_reg_arg(0, rm);
1089 *rmr = emit_memhandler_read(size);
1090 *rnr = rcache_get_tmp();
1091 emith_ctx_read(*rnr, offsetof(SH2, drc_tmp));
1092 tmp = rcache_get_reg(rm, RC_GR_RMW);
1093 emith_add_r_imm(tmp, 1 << size);
1094 rcache_unlock(tmp);
1095}
1096
1097static void emit_do_static_regs(int is_write, int tmpr)
1098{
1099 int i, r, count;
1100
1101 for (i = 0; i < ARRAY_SIZE(reg_map_g2h); i++) {
1102 r = reg_map_g2h[i];
1103 if (r == -1)
1104 continue;
1105
1106 for (count = 1; i < ARRAY_SIZE(reg_map_g2h) - 1; i++, r++) {
1107 if (reg_map_g2h[i + 1] != r + 1)
1108 break;
1109 count++;
1110 }
1111
1112 if (count > 1) {
1113 // i, r point to last item
1114 if (is_write)
1115 emith_ctx_write_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
1116 else
1117 emith_ctx_read_multiple(r - count + 1, (i - count + 1) * 4, count, tmpr);
1118 } else {
1119 if (is_write)
1120 emith_ctx_write(r, i * 4);
1121 else
1122 emith_ctx_read(r, i * 4);
1123 }
1124 }
1125}
1126
1127static void emit_block_entry(void)
1128{
1129 int arg0;
1130
1131 host_arg2reg(arg0, 0);
1132
1133#if (DRC_DEBUG & 8) || defined(PDB)
1134 int arg1, arg2;
1135 host_arg2reg(arg1, 1);
1136 host_arg2reg(arg2, 2);
1137
1138 emit_do_static_regs(1, arg2);
1139 emith_move_r_r(arg1, CONTEXT_REG);
1140 emith_move_r_r(arg2, rcache_get_reg(SHR_SR, RC_GR_READ));
1141 emith_call(sh2_drc_log_entry);
1142 rcache_invalidate();
1143#endif
1144 emith_tst_r_r(arg0, arg0);
1145 EMITH_SJMP_START(DCOND_EQ);
1146 emith_jump_reg_c(DCOND_NE, arg0);
1147 EMITH_SJMP_END(DCOND_EQ);
1148}
1149
1150#define DELAYED_OP \
1151 drcf.delayed_op = 2
1152
1153#define DELAY_SAVE_T(sr) { \
1154 emith_bic_r_imm(sr, T_save); \
1155 emith_tst_r_imm(sr, T); \
1156 EMITH_SJMP_START(DCOND_EQ); \
1157 emith_or_r_imm_c(DCOND_NE, sr, T_save); \
1158 EMITH_SJMP_END(DCOND_EQ); \
1159 drcf.use_saved_t = 1; \
1160}
1161
1162#define FLUSH_CYCLES(sr) \
1163 if (cycles > 0) { \
1164 emith_sub_r_imm(sr, cycles << 12); \
1165 cycles = 0; \
1166 }
1167
1168#define CHECK_UNHANDLED_BITS(mask) { \
1169 if ((op & (mask)) != 0) \
1170 goto default_; \
1171}
1172
1173#define GET_Fx() \
1174 ((op >> 4) & 0x0f)
1175
1176#define GET_Rm GET_Fx
1177
1178#define GET_Rn() \
1179 ((op >> 8) & 0x0f)
1180
1181#define CHECK_FX_LT(n) \
1182 if (GET_Fx() >= n) \
1183 goto default_
1184
1185static void *dr_get_pc_base(u32 pc, int is_slave);
1186
1187static void REGPARM(2) *sh2_translate(SH2 *sh2, int tcache_id)
1188{
1189 // XXX: maybe use structs instead?
1190 u32 branch_target_pc[MAX_LOCAL_BRANCHES];
1191 void *branch_target_ptr[MAX_LOCAL_BRANCHES];
1192 int branch_target_blkid[MAX_LOCAL_BRANCHES];
1193 int branch_target_count = 0;
1194 void *branch_patch_ptr[MAX_LOCAL_BRANCHES];
1195 u32 branch_patch_pc[MAX_LOCAL_BRANCHES];
1196 int branch_patch_count = 0;
1197 u32 literal_addr[MAX_LITERALS];
1198 int literal_addr_count = 0;
1199 int pending_branch_cond = -1;
1200 int pending_branch_pc = 0;
1201 u8 op_flags[BLOCK_CYCLE_LIMIT];
1202 struct {
1203 u32 delayed_op:2;
1204 u32 test_irq:1;
1205 u32 use_saved_t:1; // delayed op modifies T
1206 } drcf = { 0, };
1207
1208 // PC of current, first, last, last_target_blk SH2 insn
1209 u32 pc, base_pc, end_pc, out_pc;
1210 void *block_entry;
1211 block_desc *this_block;
1212 u16 *dr_pc_base;
1213 int blkid_main = 0;
1214 int skip_op = 0;
1215 u32 tmp, tmp2;
1216 int cycles;
1217 int op;
1218 int i;
1219
1220 base_pc = sh2->pc;
1221
1222 // get base/validate PC
1223 dr_pc_base = dr_get_pc_base(base_pc, sh2->is_slave);
1224 if (dr_pc_base == (void *)-1) {
1225 printf("invalid PC, aborting: %08x\n", base_pc);
1226 // FIXME: be less destructive
1227 exit(1);
1228 }
1229
1230 tcache_ptr = tcache_ptrs[tcache_id];
1231
1232 // predict tcache overflow
1233 tmp = tcache_ptr - tcache_bases[tcache_id];
1234 if (tmp > tcache_sizes[tcache_id] - MAX_BLOCK_SIZE) {
1235 dbg(1, "tcache %d overflow", tcache_id);
1236 return NULL;
1237 }
1238
1239 // 1st pass: scan forward for local branches
1240 scan_block(base_pc, sh2->is_slave, op_flags, &end_pc);
1241
1242 this_block = dr_add_block(base_pc, end_pc + MAX_LITERAL_OFFSET, // XXX
1243 sh2->is_slave, &blkid_main);
1244 if (this_block == NULL)
1245 return NULL;
1246
1247 block_entry = tcache_ptr;
1248 dbg(2, "== %csh2 block #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
1249 tcache_id, blkid_main, base_pc, block_entry);
1250
1251 dr_link_blocks(tcache_ptr, base_pc, tcache_id);
1252
1253 // collect branch_targets that don't land on delay slots
1254 for (pc = base_pc; pc <= end_pc; pc += 2) {
1255 if (!(OP_FLAGS(pc) & OF_TARGET))
1256 continue;
1257 if (OP_FLAGS(pc) & OF_DELAY_OP) {
1258 OP_FLAGS(pc) &= ~OF_TARGET;
1259 continue;
1260 }
1261 ADD_TO_ARRAY(branch_target_pc, branch_target_count, pc, break);
1262 }
1263
1264 if (branch_target_count > 0) {
1265 memset(branch_target_ptr, 0, sizeof(branch_target_ptr[0]) * branch_target_count);
1266 memset(branch_target_blkid, 0, sizeof(branch_target_blkid[0]) * branch_target_count);
1267 }
1268
1269 // -------------------------------------------------
1270 // 2nd pass: actual compilation
1271 out_pc = 0;
1272 pc = base_pc;
1273 for (cycles = 0; pc <= end_pc || drcf.delayed_op; )
1274 {
1275 u32 tmp3, tmp4, sr;
1276
1277 if (drcf.delayed_op > 0)
1278 drcf.delayed_op--;
1279
1280 op = FETCH_OP(pc);
1281
1282 if ((OP_FLAGS(pc) & OF_TARGET) || pc == base_pc)
1283 {
1284 i = find_in_array(branch_target_pc, branch_target_count, pc);
1285 if (pc != base_pc)
1286 {
1287 /* make "subblock" - just a mid-block entry */
1288 block_desc *subblock;
1289
1290 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1291 FLUSH_CYCLES(sr);
1292 // decide if to flush rcache
1293 if ((op & 0xf0ff) == 0x4010 && FETCH_OP(pc + 2) == 0x8bfd) // DT; BF #-2
1294 rcache_clean();
1295 else
1296 rcache_flush();
1297 do_host_disasm(tcache_id);
1298
1299 dbg(2, "-- %csh2 subblock #%d,%d %08x -> %p", sh2->is_slave ? 's' : 'm',
1300 tcache_id, branch_target_blkid[i], pc, tcache_ptr);
1301
1302 subblock = dr_add_block(pc, end_pc + MAX_LITERAL_OFFSET, // XXX
1303 sh2->is_slave, &branch_target_blkid[i]);
1304 if (subblock == NULL)
1305 return NULL;
1306
1307 // since we made a block entry, link any other blocks that jump to current pc
1308 dr_link_blocks(tcache_ptr, pc, tcache_id);
1309 }
1310 if (i >= 0)
1311 branch_target_ptr[i] = tcache_ptr;
1312
1313 // must update PC
1314 emit_move_r_imm32(SHR_PC, pc);
1315 rcache_clean();
1316
1317 // check cycles
1318 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1319 emith_cmp_r_imm(sr, 0);
1320 emith_jump_cond(DCOND_LE, sh2_drc_exit);
1321 do_host_disasm(tcache_id);
1322 rcache_unlock_all();
1323 }
1324
1325#if (DRC_DEBUG & 2)
1326 insns_compiled++;
1327#endif
1328#if (DRC_DEBUG & 4)
1329 DasmSH2(sh2dasm_buff, pc, op);
1330 printf("%08x %04x %s\n", pc, op, sh2dasm_buff);
1331#endif
1332#ifdef DRC_CMP
1333 //if (out_pc != 0 && out_pc != (u32)-1)
1334 // emit_move_r_imm32(SHR_PC, out_pc);
1335 //else
1336 if (!drcf.delayed_op) {
1337 emit_move_r_imm32(SHR_PC, pc);
1338 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1339 FLUSH_CYCLES(sr);
1340 // rcache_clean(); // FIXME
1341 rcache_flush();
1342 emit_do_static_regs(1, 0);
1343 emith_pass_arg_r(0, CONTEXT_REG);
1344 emith_call(do_sh2_cmp);
1345 }
1346#endif
1347
1348 pc += 2;
1349 cycles++;
1350
1351 if (skip_op > 0) {
1352 skip_op--;
1353 continue;
1354 }
1355
1356 switch ((op >> 12) & 0x0f)
1357 {
1358 /////////////////////////////////////////////
1359 case 0x00:
1360 switch (op & 0x0f)
1361 {
1362 case 0x02:
1363 tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1364 switch (GET_Fx())
1365 {
1366 case 0: // STC SR,Rn 0000nnnn00000010
1367 tmp2 = SHR_SR;
1368 break;
1369 case 1: // STC GBR,Rn 0000nnnn00010010
1370 tmp2 = SHR_GBR;
1371 break;
1372 case 2: // STC VBR,Rn 0000nnnn00100010
1373 tmp2 = SHR_VBR;
1374 break;
1375 default:
1376 goto default_;
1377 }
1378 tmp3 = rcache_get_reg(tmp2, RC_GR_READ);
1379 emith_move_r_r(tmp, tmp3);
1380 if (tmp2 == SHR_SR)
1381 emith_clear_msb(tmp, tmp, 22); // reserved bits defined by ISA as 0
1382 goto end_op;
1383 case 0x03:
1384 CHECK_UNHANDLED_BITS(0xd0);
1385 // BRAF Rm 0000mmmm00100011
1386 // BSRF Rm 0000mmmm00000011
1387 DELAYED_OP;
1388 tmp = rcache_get_reg(SHR_PC, RC_GR_WRITE);
1389 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1390 emith_move_r_r(tmp, tmp2);
1391 if (op & 0x20)
1392 emith_add_r_imm(tmp, pc + 2);
1393 else { // BSRF
1394 tmp3 = rcache_get_reg(SHR_PR, RC_GR_WRITE);
1395 emith_move_r_imm(tmp3, pc + 2);
1396 emith_add_r_r(tmp, tmp3);
1397 }
1398 out_pc = (u32)-1;
1399 cycles++;
1400 goto end_op;
1401 case 0x04: // MOV.B Rm,@(R0,Rn) 0000nnnnmmmm0100
1402 case 0x05: // MOV.W Rm,@(R0,Rn) 0000nnnnmmmm0101
1403 case 0x06: // MOV.L Rm,@(R0,Rn) 0000nnnnmmmm0110
1404 rcache_clean();
1405 tmp = rcache_get_reg_arg(1, GET_Rm());
1406 tmp2 = rcache_get_reg_arg(0, SHR_R0);
1407 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1408 emith_add_r_r(tmp2, tmp3);
1409 emit_memhandler_write(op & 3, pc, drcf.delayed_op);
1410 goto end_op;
1411 case 0x07:
1412 // MUL.L Rm,Rn 0000nnnnmmmm0111
1413 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1414 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1415 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1416 emith_mul(tmp3, tmp2, tmp);
1417 cycles++;
1418 goto end_op;
1419 case 0x08:
1420 CHECK_UNHANDLED_BITS(0xf00);
1421 switch (GET_Fx())
1422 {
1423 case 0: // CLRT 0000000000001000
1424 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1425 if (drcf.delayed_op)
1426 DELAY_SAVE_T(sr);
1427 emith_bic_r_imm(sr, T);
1428 break;
1429 case 1: // SETT 0000000000011000
1430 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1431 if (drcf.delayed_op)
1432 DELAY_SAVE_T(sr);
1433 emith_or_r_imm(sr, T);
1434 break;
1435 case 2: // CLRMAC 0000000000101000
1436 emit_move_r_imm32(SHR_MACL, 0);
1437 emit_move_r_imm32(SHR_MACH, 0);
1438 break;
1439 default:
1440 goto default_;
1441 }
1442 goto end_op;
1443 case 0x09:
1444 switch (GET_Fx())
1445 {
1446 case 0: // NOP 0000000000001001
1447 CHECK_UNHANDLED_BITS(0xf00);
1448 break;
1449 case 1: // DIV0U 0000000000011001
1450 CHECK_UNHANDLED_BITS(0xf00);
1451 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1452 if (drcf.delayed_op)
1453 DELAY_SAVE_T(sr);
1454 emith_bic_r_imm(sr, M|Q|T);
1455 break;
1456 case 2: // MOVT Rn 0000nnnn00101001
1457 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1458 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1459 emith_clear_msb(tmp2, sr, 31);
1460 break;
1461 default:
1462 goto default_;
1463 }
1464 goto end_op;
1465 case 0x0a:
1466 tmp = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1467 switch (GET_Fx())
1468 {
1469 case 0: // STS MACH,Rn 0000nnnn00001010
1470 tmp2 = SHR_MACH;
1471 break;
1472 case 1: // STS MACL,Rn 0000nnnn00011010
1473 tmp2 = SHR_MACL;
1474 break;
1475 case 2: // STS PR,Rn 0000nnnn00101010
1476 tmp2 = SHR_PR;
1477 break;
1478 default:
1479 goto default_;
1480 }
1481 tmp2 = rcache_get_reg(tmp2, RC_GR_READ);
1482 emith_move_r_r(tmp, tmp2);
1483 goto end_op;
1484 case 0x0b:
1485 CHECK_UNHANDLED_BITS(0xf00);
1486 switch (GET_Fx())
1487 {
1488 case 0: // RTS 0000000000001011
1489 DELAYED_OP;
1490 emit_move_r_r(SHR_PC, SHR_PR);
1491 out_pc = (u32)-1;
1492 cycles++;
1493 break;
1494 case 1: // SLEEP 0000000000011011
1495 tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
1496 emith_clear_msb(tmp, tmp, 20); // clear cycles
1497 out_pc = out_pc - 2;
1498 cycles = 1;
1499 goto end_op;
1500 case 2: // RTE 0000000000101011
1501 DELAYED_OP;
1502 // pop PC
1503 emit_memhandler_read_rr(SHR_PC, SHR_SP, 0, 2);
1504 // pop SR
1505 tmp = rcache_get_reg_arg(0, SHR_SP);
1506 emith_add_r_imm(tmp, 4);
1507 tmp = emit_memhandler_read(2);
1508 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1509 emith_write_sr(sr, tmp);
1510 rcache_free_tmp(tmp);
1511 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
1512 emith_add_r_imm(tmp, 4*2);
1513 drcf.test_irq = 1;
1514 out_pc = (u32)-1;
1515 cycles += 3;
1516 break;
1517 default:
1518 goto default_;
1519 }
1520 goto end_op;
1521 case 0x0c: // MOV.B @(R0,Rm),Rn 0000nnnnmmmm1100
1522 case 0x0d: // MOV.W @(R0,Rm),Rn 0000nnnnmmmm1101
1523 case 0x0e: // MOV.L @(R0,Rm),Rn 0000nnnnmmmm1110
1524 tmp = emit_indirect_indexed_read(SHR_R0, GET_Rm(), op & 3);
1525 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
1526 if ((op & 3) != 2) {
1527 emith_sext(tmp2, tmp, (op & 1) ? 16 : 8);
1528 } else
1529 emith_move_r_r(tmp2, tmp);
1530 rcache_free_tmp(tmp);
1531 goto end_op;
1532 case 0x0f: // MAC.L @Rm+,@Rn+ 0000nnnnmmmm1111
1533 emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 2);
1534 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
1535 /* MS 16 MAC bits unused if saturated */
1536 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
1537 emith_tst_r_imm(sr, S);
1538 EMITH_SJMP_START(DCOND_EQ);
1539 emith_clear_msb_c(DCOND_NE, tmp4, tmp4, 16);
1540 EMITH_SJMP_END(DCOND_EQ);
1541 rcache_unlock(sr);
1542 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW); // might evict SR
1543 emith_mula_s64(tmp3, tmp4, tmp, tmp2);
1544 rcache_free_tmp(tmp2);
1545 sr = rcache_get_reg(SHR_SR, RC_GR_READ); // reget just in case
1546 emith_tst_r_imm(sr, S);
1547
1548 EMITH_JMP_START(DCOND_EQ);
1549 emith_asr(tmp, tmp4, 15);
1550 emith_cmp_r_imm(tmp, -1); // negative overflow (0x80000000..0xffff7fff)
1551 EMITH_SJMP_START(DCOND_GE);
1552 emith_move_r_imm_c(DCOND_LT, tmp4, 0x8000);
1553 emith_move_r_imm_c(DCOND_LT, tmp3, 0x0000);
1554 EMITH_SJMP_END(DCOND_GE);
1555 emith_cmp_r_imm(tmp, 0); // positive overflow (0x00008000..0x7fffffff)
1556 EMITH_SJMP_START(DCOND_LE);
1557 emith_move_r_imm_c(DCOND_GT, tmp4, 0x00007fff);
1558 emith_move_r_imm_c(DCOND_GT, tmp3, 0xffffffff);
1559 EMITH_SJMP_END(DCOND_LE);
1560 EMITH_JMP_END(DCOND_EQ);
1561
1562 rcache_free_tmp(tmp);
1563 cycles += 2;
1564 goto end_op;
1565 }
1566 goto default_;
1567
1568 /////////////////////////////////////////////
1569 case 0x01:
1570 // MOV.L Rm,@(disp,Rn) 0001nnnnmmmmdddd
1571 rcache_clean();
1572 tmp = rcache_get_reg_arg(0, GET_Rn());
1573 tmp2 = rcache_get_reg_arg(1, GET_Rm());
1574 if (op & 0x0f)
1575 emith_add_r_imm(tmp, (op & 0x0f) * 4);
1576 emit_memhandler_write(2, pc, drcf.delayed_op);
1577 goto end_op;
1578
1579 case 0x02:
1580 switch (op & 0x0f)
1581 {
1582 case 0x00: // MOV.B Rm,@Rn 0010nnnnmmmm0000
1583 case 0x01: // MOV.W Rm,@Rn 0010nnnnmmmm0001
1584 case 0x02: // MOV.L Rm,@Rn 0010nnnnmmmm0010
1585 rcache_clean();
1586 rcache_get_reg_arg(0, GET_Rn());
1587 rcache_get_reg_arg(1, GET_Rm());
1588 emit_memhandler_write(op & 3, pc, drcf.delayed_op);
1589 goto end_op;
1590 case 0x04: // MOV.B Rm,@–Rn 0010nnnnmmmm0100
1591 case 0x05: // MOV.W Rm,@–Rn 0010nnnnmmmm0101
1592 case 0x06: // MOV.L Rm,@–Rn 0010nnnnmmmm0110
1593 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1594 emith_sub_r_imm(tmp, (1 << (op & 3)));
1595 rcache_clean();
1596 rcache_get_reg_arg(0, GET_Rn());
1597 rcache_get_reg_arg(1, GET_Rm());
1598 emit_memhandler_write(op & 3, pc, drcf.delayed_op);
1599 goto end_op;
1600 case 0x07: // DIV0S Rm,Rn 0010nnnnmmmm0111
1601 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1602 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1603 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1604 if (drcf.delayed_op)
1605 DELAY_SAVE_T(sr);
1606 emith_bic_r_imm(sr, M|Q|T);
1607 emith_tst_r_imm(tmp2, (1<<31));
1608 EMITH_SJMP_START(DCOND_EQ);
1609 emith_or_r_imm_c(DCOND_NE, sr, Q);
1610 EMITH_SJMP_END(DCOND_EQ);
1611 emith_tst_r_imm(tmp3, (1<<31));
1612 EMITH_SJMP_START(DCOND_EQ);
1613 emith_or_r_imm_c(DCOND_NE, sr, M);
1614 EMITH_SJMP_END(DCOND_EQ);
1615 emith_teq_r_r(tmp2, tmp3);
1616 EMITH_SJMP_START(DCOND_PL);
1617 emith_or_r_imm_c(DCOND_MI, sr, T);
1618 EMITH_SJMP_END(DCOND_PL);
1619 goto end_op;
1620 case 0x08: // TST Rm,Rn 0010nnnnmmmm1000
1621 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1622 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1623 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1624 if (drcf.delayed_op)
1625 DELAY_SAVE_T(sr);
1626 emith_bic_r_imm(sr, T);
1627 emith_tst_r_r(tmp2, tmp3);
1628 emit_or_t_if_eq(sr);
1629 goto end_op;
1630 case 0x09: // AND Rm,Rn 0010nnnnmmmm1001
1631 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1632 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1633 emith_and_r_r(tmp, tmp2);
1634 goto end_op;
1635 case 0x0a: // XOR Rm,Rn 0010nnnnmmmm1010
1636 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1637 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1638 emith_eor_r_r(tmp, tmp2);
1639 goto end_op;
1640 case 0x0b: // OR Rm,Rn 0010nnnnmmmm1011
1641 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1642 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1643 emith_or_r_r(tmp, tmp2);
1644 goto end_op;
1645 case 0x0c: // CMP/STR Rm,Rn 0010nnnnmmmm1100
1646 tmp = rcache_get_tmp();
1647 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1648 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1649 emith_eor_r_r_r(tmp, tmp2, tmp3);
1650 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1651 if (drcf.delayed_op)
1652 DELAY_SAVE_T(sr);
1653 emith_bic_r_imm(sr, T);
1654 emith_tst_r_imm(tmp, 0x000000ff);
1655 emit_or_t_if_eq(tmp);
1656 emith_tst_r_imm(tmp, 0x0000ff00);
1657 emit_or_t_if_eq(tmp);
1658 emith_tst_r_imm(tmp, 0x00ff0000);
1659 emit_or_t_if_eq(tmp);
1660 emith_tst_r_imm(tmp, 0xff000000);
1661 emit_or_t_if_eq(tmp);
1662 rcache_free_tmp(tmp);
1663 goto end_op;
1664 case 0x0d: // XTRCT Rm,Rn 0010nnnnmmmm1101
1665 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1666 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1667 emith_lsr(tmp, tmp, 16);
1668 emith_or_r_r_lsl(tmp, tmp2, 16);
1669 goto end_op;
1670 case 0x0e: // MULU.W Rm,Rn 0010nnnnmmmm1110
1671 case 0x0f: // MULS.W Rm,Rn 0010nnnnmmmm1111
1672 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1673 tmp = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1674 if (op & 1) {
1675 emith_sext(tmp, tmp2, 16);
1676 } else
1677 emith_clear_msb(tmp, tmp2, 16);
1678 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1679 tmp2 = rcache_get_tmp();
1680 if (op & 1) {
1681 emith_sext(tmp2, tmp3, 16);
1682 } else
1683 emith_clear_msb(tmp2, tmp3, 16);
1684 emith_mul(tmp, tmp, tmp2);
1685 rcache_free_tmp(tmp2);
1686 goto end_op;
1687 }
1688 goto default_;
1689
1690 /////////////////////////////////////////////
1691 case 0x03:
1692 switch (op & 0x0f)
1693 {
1694 case 0x00: // CMP/EQ Rm,Rn 0011nnnnmmmm0000
1695 case 0x02: // CMP/HS Rm,Rn 0011nnnnmmmm0010
1696 case 0x03: // CMP/GE Rm,Rn 0011nnnnmmmm0011
1697 case 0x06: // CMP/HI Rm,Rn 0011nnnnmmmm0110
1698 case 0x07: // CMP/GT Rm,Rn 0011nnnnmmmm0111
1699 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1700 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_READ);
1701 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1702 if (drcf.delayed_op)
1703 DELAY_SAVE_T(sr);
1704 emith_bic_r_imm(sr, T);
1705 emith_cmp_r_r(tmp2, tmp3);
1706 switch (op & 0x07)
1707 {
1708 case 0x00: // CMP/EQ
1709 emit_or_t_if_eq(sr);
1710 break;
1711 case 0x02: // CMP/HS
1712 EMITH_SJMP_START(DCOND_LO);
1713 emith_or_r_imm_c(DCOND_HS, sr, T);
1714 EMITH_SJMP_END(DCOND_LO);
1715 break;
1716 case 0x03: // CMP/GE
1717 EMITH_SJMP_START(DCOND_LT);
1718 emith_or_r_imm_c(DCOND_GE, sr, T);
1719 EMITH_SJMP_END(DCOND_LT);
1720 break;
1721 case 0x06: // CMP/HI
1722 EMITH_SJMP_START(DCOND_LS);
1723 emith_or_r_imm_c(DCOND_HI, sr, T);
1724 EMITH_SJMP_END(DCOND_LS);
1725 break;
1726 case 0x07: // CMP/GT
1727 EMITH_SJMP_START(DCOND_LE);
1728 emith_or_r_imm_c(DCOND_GT, sr, T);
1729 EMITH_SJMP_END(DCOND_LE);
1730 break;
1731 }
1732 goto end_op;
1733 case 0x04: // DIV1 Rm,Rn 0011nnnnmmmm0100
1734 // Q1 = carry(Rn = (Rn << 1) | T)
1735 // if Q ^ M
1736 // Q2 = carry(Rn += Rm)
1737 // else
1738 // Q2 = carry(Rn -= Rm)
1739 // Q = M ^ Q1 ^ Q2
1740 // T = (Q == M) = !(Q ^ M) = !(Q1 ^ Q2)
1741 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1742 tmp3 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1743 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1744 if (drcf.delayed_op)
1745 DELAY_SAVE_T(sr);
1746 emith_tpop_carry(sr, 0);
1747 emith_adcf_r_r(tmp2, tmp2);
1748 emith_tpush_carry(sr, 0); // keep Q1 in T for now
1749 tmp4 = rcache_get_tmp();
1750 emith_and_r_r_imm(tmp4, sr, M);
1751 emith_eor_r_r_lsr(sr, tmp4, M_SHIFT - Q_SHIFT); // Q ^= M
1752 rcache_free_tmp(tmp4);
1753 // add or sub, invert T if carry to get Q1 ^ Q2
1754 // in: (Q ^ M) passed in Q, Q1 in T
1755 emith_sh2_div1_step(tmp2, tmp3, sr);
1756 emith_bic_r_imm(sr, Q);
1757 emith_tst_r_imm(sr, M);
1758 EMITH_SJMP_START(DCOND_EQ);
1759 emith_or_r_imm_c(DCOND_NE, sr, Q); // Q = M
1760 EMITH_SJMP_END(DCOND_EQ);
1761 emith_tst_r_imm(sr, T);
1762 EMITH_SJMP_START(DCOND_EQ);
1763 emith_eor_r_imm_c(DCOND_NE, sr, Q); // Q = M ^ Q1 ^ Q2
1764 EMITH_SJMP_END(DCOND_EQ);
1765 emith_eor_r_imm(sr, T); // T = !(Q1 ^ Q2)
1766 goto end_op;
1767 case 0x05: // DMULU.L Rm,Rn 0011nnnnmmmm0101
1768 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1769 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1770 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1771 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
1772 emith_mul_u64(tmp3, tmp4, tmp, tmp2);
1773 cycles++;
1774 goto end_op;
1775 case 0x08: // SUB Rm,Rn 0011nnnnmmmm1000
1776 case 0x0c: // ADD Rm,Rn 0011nnnnmmmm1100
1777 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1778 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1779 if (op & 4) {
1780 emith_add_r_r(tmp, tmp2);
1781 } else
1782 emith_sub_r_r(tmp, tmp2);
1783 goto end_op;
1784 case 0x0a: // SUBC Rm,Rn 0011nnnnmmmm1010
1785 case 0x0e: // ADDC Rm,Rn 0011nnnnmmmm1110
1786 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1787 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1788 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1789 if (drcf.delayed_op)
1790 DELAY_SAVE_T(sr);
1791 if (op & 4) { // adc
1792 emith_tpop_carry(sr, 0);
1793 emith_adcf_r_r(tmp, tmp2);
1794 emith_tpush_carry(sr, 0);
1795 } else {
1796 emith_tpop_carry(sr, 1);
1797 emith_sbcf_r_r(tmp, tmp2);
1798 emith_tpush_carry(sr, 1);
1799 }
1800 goto end_op;
1801 case 0x0b: // SUBV Rm,Rn 0011nnnnmmmm1011
1802 case 0x0f: // ADDV Rm,Rn 0011nnnnmmmm1111
1803 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1804 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1805 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1806 if (drcf.delayed_op)
1807 DELAY_SAVE_T(sr);
1808 emith_bic_r_imm(sr, T);
1809 if (op & 4) {
1810 emith_addf_r_r(tmp, tmp2);
1811 } else
1812 emith_subf_r_r(tmp, tmp2);
1813 EMITH_SJMP_START(DCOND_VC);
1814 emith_or_r_imm_c(DCOND_VS, sr, T);
1815 EMITH_SJMP_END(DCOND_VC);
1816 goto end_op;
1817 case 0x0d: // DMULS.L Rm,Rn 0011nnnnmmmm1101
1818 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
1819 tmp2 = rcache_get_reg(GET_Rm(), RC_GR_READ);
1820 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_WRITE);
1821 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_WRITE);
1822 emith_mul_s64(tmp3, tmp4, tmp, tmp2);
1823 cycles++;
1824 goto end_op;
1825 }
1826 goto default_;
1827
1828 /////////////////////////////////////////////
1829 case 0x04:
1830 switch (op & 0x0f)
1831 {
1832 case 0x00:
1833 switch (GET_Fx())
1834 {
1835 case 0: // SHLL Rn 0100nnnn00000000
1836 case 2: // SHAL Rn 0100nnnn00100000
1837 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1838 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1839 if (drcf.delayed_op)
1840 DELAY_SAVE_T(sr);
1841 emith_tpop_carry(sr, 0); // dummy
1842 emith_lslf(tmp, tmp, 1);
1843 emith_tpush_carry(sr, 0);
1844 goto end_op;
1845 case 1: // DT Rn 0100nnnn00010000
1846 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1847 if (drcf.delayed_op)
1848 DELAY_SAVE_T(sr);
1849#ifndef DRC_CMP
1850 if (FETCH_OP(pc) == 0x8bfd) { // BF #-2
1851 if (gconst_get(GET_Rn(), &tmp)) {
1852 // XXX: limit burned cycles
1853 emit_move_r_imm32(GET_Rn(), 0);
1854 emith_or_r_imm(sr, T);
1855 cycles += tmp * 4 + 1; // +1 syncs with noconst version, not sure why
1856 skip_op = 1;
1857 }
1858 else
1859 emith_sh2_dtbf_loop();
1860 goto end_op;
1861 }
1862#endif
1863 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1864 emith_bic_r_imm(sr, T);
1865 emith_subf_r_imm(tmp, 1);
1866 emit_or_t_if_eq(sr);
1867 goto end_op;
1868 }
1869 goto default_;
1870 case 0x01:
1871 switch (GET_Fx())
1872 {
1873 case 0: // SHLR Rn 0100nnnn00000001
1874 case 2: // SHAR Rn 0100nnnn00100001
1875 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1876 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1877 if (drcf.delayed_op)
1878 DELAY_SAVE_T(sr);
1879 emith_tpop_carry(sr, 0); // dummy
1880 if (op & 0x20) {
1881 emith_asrf(tmp, tmp, 1);
1882 } else
1883 emith_lsrf(tmp, tmp, 1);
1884 emith_tpush_carry(sr, 0);
1885 goto end_op;
1886 case 1: // CMP/PZ Rn 0100nnnn00010001
1887 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1888 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1889 if (drcf.delayed_op)
1890 DELAY_SAVE_T(sr);
1891 emith_bic_r_imm(sr, T);
1892 emith_cmp_r_imm(tmp, 0);
1893 EMITH_SJMP_START(DCOND_LT);
1894 emith_or_r_imm_c(DCOND_GE, sr, T);
1895 EMITH_SJMP_END(DCOND_LT);
1896 goto end_op;
1897 }
1898 goto default_;
1899 case 0x02:
1900 case 0x03:
1901 switch (op & 0x3f)
1902 {
1903 case 0x02: // STS.L MACH,@–Rn 0100nnnn00000010
1904 tmp = SHR_MACH;
1905 break;
1906 case 0x12: // STS.L MACL,@–Rn 0100nnnn00010010
1907 tmp = SHR_MACL;
1908 break;
1909 case 0x22: // STS.L PR,@–Rn 0100nnnn00100010
1910 tmp = SHR_PR;
1911 break;
1912 case 0x03: // STC.L SR,@–Rn 0100nnnn00000011
1913 tmp = SHR_SR;
1914 cycles++;
1915 break;
1916 case 0x13: // STC.L GBR,@–Rn 0100nnnn00010011
1917 tmp = SHR_GBR;
1918 cycles++;
1919 break;
1920 case 0x23: // STC.L VBR,@–Rn 0100nnnn00100011
1921 tmp = SHR_VBR;
1922 cycles++;
1923 break;
1924 default:
1925 goto default_;
1926 }
1927 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1928 emith_sub_r_imm(tmp2, 4);
1929 rcache_clean();
1930 rcache_get_reg_arg(0, GET_Rn());
1931 tmp3 = rcache_get_reg_arg(1, tmp);
1932 if (tmp == SHR_SR)
1933 emith_clear_msb(tmp3, tmp3, 22); // reserved bits defined by ISA as 0
1934 emit_memhandler_write(2, pc, drcf.delayed_op);
1935 goto end_op;
1936 case 0x04:
1937 case 0x05:
1938 switch (op & 0x3f)
1939 {
1940 case 0x04: // ROTL Rn 0100nnnn00000100
1941 case 0x05: // ROTR Rn 0100nnnn00000101
1942 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1943 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1944 if (drcf.delayed_op)
1945 DELAY_SAVE_T(sr);
1946 emith_tpop_carry(sr, 0); // dummy
1947 if (op & 1) {
1948 emith_rorf(tmp, tmp, 1);
1949 } else
1950 emith_rolf(tmp, tmp, 1);
1951 emith_tpush_carry(sr, 0);
1952 goto end_op;
1953 case 0x24: // ROTCL Rn 0100nnnn00100100
1954 case 0x25: // ROTCR Rn 0100nnnn00100101
1955 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1956 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1957 if (drcf.delayed_op)
1958 DELAY_SAVE_T(sr);
1959 emith_tpop_carry(sr, 0);
1960 if (op & 1) {
1961 emith_rorcf(tmp);
1962 } else
1963 emith_rolcf(tmp);
1964 emith_tpush_carry(sr, 0);
1965 goto end_op;
1966 case 0x15: // CMP/PL Rn 0100nnnn00010101
1967 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
1968 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
1969 if (drcf.delayed_op)
1970 DELAY_SAVE_T(sr);
1971 emith_bic_r_imm(sr, T);
1972 emith_cmp_r_imm(tmp, 0);
1973 EMITH_SJMP_START(DCOND_LE);
1974 emith_or_r_imm_c(DCOND_GT, sr, T);
1975 EMITH_SJMP_END(DCOND_LE);
1976 goto end_op;
1977 }
1978 goto default_;
1979 case 0x06:
1980 case 0x07:
1981 switch (op & 0x3f)
1982 {
1983 case 0x06: // LDS.L @Rm+,MACH 0100mmmm00000110
1984 tmp = SHR_MACH;
1985 break;
1986 case 0x16: // LDS.L @Rm+,MACL 0100mmmm00010110
1987 tmp = SHR_MACL;
1988 break;
1989 case 0x26: // LDS.L @Rm+,PR 0100mmmm00100110
1990 tmp = SHR_PR;
1991 break;
1992 case 0x07: // LDC.L @Rm+,SR 0100mmmm00000111
1993 tmp = SHR_SR;
1994 cycles += 2;
1995 break;
1996 case 0x17: // LDC.L @Rm+,GBR 0100mmmm00010111
1997 tmp = SHR_GBR;
1998 cycles += 2;
1999 break;
2000 case 0x27: // LDC.L @Rm+,VBR 0100mmmm00100111
2001 tmp = SHR_VBR;
2002 cycles += 2;
2003 break;
2004 default:
2005 goto default_;
2006 }
2007 rcache_get_reg_arg(0, GET_Rn());
2008 tmp2 = emit_memhandler_read(2);
2009 if (tmp == SHR_SR) {
2010 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2011 if (drcf.delayed_op)
2012 DELAY_SAVE_T(sr);
2013 emith_write_sr(sr, tmp2);
2014 drcf.test_irq = 1;
2015 } else {
2016 tmp = rcache_get_reg(tmp, RC_GR_WRITE);
2017 emith_move_r_r(tmp, tmp2);
2018 }
2019 rcache_free_tmp(tmp2);
2020 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2021 emith_add_r_imm(tmp, 4);
2022 goto end_op;
2023 case 0x08:
2024 case 0x09:
2025 switch (GET_Fx())
2026 {
2027 case 0:
2028 // SHLL2 Rn 0100nnnn00001000
2029 // SHLR2 Rn 0100nnnn00001001
2030 tmp = 2;
2031 break;
2032 case 1:
2033 // SHLL8 Rn 0100nnnn00011000
2034 // SHLR8 Rn 0100nnnn00011001
2035 tmp = 8;
2036 break;
2037 case 2:
2038 // SHLL16 Rn 0100nnnn00101000
2039 // SHLR16 Rn 0100nnnn00101001
2040 tmp = 16;
2041 break;
2042 default:
2043 goto default_;
2044 }
2045 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2046 if (op & 1) {
2047 emith_lsr(tmp2, tmp2, tmp);
2048 } else
2049 emith_lsl(tmp2, tmp2, tmp);
2050 goto end_op;
2051 case 0x0a:
2052 switch (GET_Fx())
2053 {
2054 case 0: // LDS Rm,MACH 0100mmmm00001010
2055 tmp2 = SHR_MACH;
2056 break;
2057 case 1: // LDS Rm,MACL 0100mmmm00011010
2058 tmp2 = SHR_MACL;
2059 break;
2060 case 2: // LDS Rm,PR 0100mmmm00101010
2061 tmp2 = SHR_PR;
2062 break;
2063 default:
2064 goto default_;
2065 }
2066 emit_move_r_r(tmp2, GET_Rn());
2067 goto end_op;
2068 case 0x0b:
2069 switch (GET_Fx())
2070 {
2071 case 0: // JSR @Rm 0100mmmm00001011
2072 case 2: // JMP @Rm 0100mmmm00101011
2073 DELAYED_OP;
2074 if (!(op & 0x20))
2075 emit_move_r_imm32(SHR_PR, pc + 2);
2076 emit_move_r_r(SHR_PC, (op >> 8) & 0x0f);
2077 out_pc = (u32)-1;
2078 cycles++;
2079 break;
2080 case 1: // TAS.B @Rn 0100nnnn00011011
2081 // XXX: is TAS working on 32X?
2082 rcache_get_reg_arg(0, GET_Rn());
2083 tmp = emit_memhandler_read(0);
2084 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2085 if (drcf.delayed_op)
2086 DELAY_SAVE_T(sr);
2087 emith_bic_r_imm(sr, T);
2088 emith_cmp_r_imm(tmp, 0);
2089 emit_or_t_if_eq(sr);
2090 rcache_clean();
2091 emith_or_r_imm(tmp, 0x80);
2092 tmp2 = rcache_get_tmp_arg(1); // assuming it differs to tmp
2093 emith_move_r_r(tmp2, tmp);
2094 rcache_free_tmp(tmp);
2095 rcache_get_reg_arg(0, GET_Rn());
2096 emit_memhandler_write(0, pc, drcf.delayed_op);
2097 cycles += 3;
2098 break;
2099 default:
2100 goto default_;
2101 }
2102 goto end_op;
2103 case 0x0e:
2104 tmp = rcache_get_reg(GET_Rn(), RC_GR_READ);
2105 switch (GET_Fx())
2106 {
2107 case 0: // LDC Rm,SR 0100mmmm00001110
2108 tmp2 = SHR_SR;
2109 break;
2110 case 1: // LDC Rm,GBR 0100mmmm00011110
2111 tmp2 = SHR_GBR;
2112 break;
2113 case 2: // LDC Rm,VBR 0100mmmm00101110
2114 tmp2 = SHR_VBR;
2115 break;
2116 default:
2117 goto default_;
2118 }
2119 if (tmp2 == SHR_SR) {
2120 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2121 if (drcf.delayed_op)
2122 DELAY_SAVE_T(sr);
2123 emith_write_sr(sr, tmp);
2124 drcf.test_irq = 1;
2125 } else {
2126 tmp2 = rcache_get_reg(tmp2, RC_GR_WRITE);
2127 emith_move_r_r(tmp2, tmp);
2128 }
2129 goto end_op;
2130 case 0x0f:
2131 // MAC.W @Rm+,@Rn+ 0100nnnnmmmm1111
2132 emit_indirect_read_double(&tmp, &tmp2, GET_Rn(), GET_Rm(), 1);
2133 emith_sext(tmp, tmp, 16);
2134 emith_sext(tmp2, tmp2, 16);
2135 tmp3 = rcache_get_reg(SHR_MACL, RC_GR_RMW);
2136 tmp4 = rcache_get_reg(SHR_MACH, RC_GR_RMW);
2137 emith_mula_s64(tmp3, tmp4, tmp, tmp2);
2138 rcache_free_tmp(tmp2);
2139 // XXX: MACH should be untouched when S is set?
2140 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2141 emith_tst_r_imm(sr, S);
2142 EMITH_JMP_START(DCOND_EQ);
2143
2144 emith_asr(tmp, tmp3, 31);
2145 emith_eorf_r_r(tmp, tmp4); // tmp = ((signed)macl >> 31) ^ mach
2146 EMITH_JMP_START(DCOND_EQ);
2147 emith_move_r_imm(tmp3, 0x80000000);
2148 emith_tst_r_r(tmp4, tmp4);
2149 EMITH_SJMP_START(DCOND_MI);
2150 emith_sub_r_imm_c(DCOND_PL, tmp3, 1); // positive
2151 EMITH_SJMP_END(DCOND_MI);
2152 EMITH_JMP_END(DCOND_EQ);
2153
2154 EMITH_JMP_END(DCOND_EQ);
2155 rcache_free_tmp(tmp);
2156 cycles += 2;
2157 goto end_op;
2158 }
2159 goto default_;
2160
2161 /////////////////////////////////////////////
2162 case 0x05:
2163 // MOV.L @(disp,Rm),Rn 0101nnnnmmmmdddd
2164 emit_memhandler_read_rr(GET_Rn(), GET_Rm(), (op & 0x0f) * 4, 2);
2165 goto end_op;
2166
2167 /////////////////////////////////////////////
2168 case 0x06:
2169 switch (op & 0x0f)
2170 {
2171 case 0x00: // MOV.B @Rm,Rn 0110nnnnmmmm0000
2172 case 0x01: // MOV.W @Rm,Rn 0110nnnnmmmm0001
2173 case 0x02: // MOV.L @Rm,Rn 0110nnnnmmmm0010
2174 case 0x04: // MOV.B @Rm+,Rn 0110nnnnmmmm0100
2175 case 0x05: // MOV.W @Rm+,Rn 0110nnnnmmmm0101
2176 case 0x06: // MOV.L @Rm+,Rn 0110nnnnmmmm0110
2177 emit_memhandler_read_rr(GET_Rn(), GET_Rm(), 0, op & 3);
2178 if ((op & 7) >= 4 && GET_Rn() != GET_Rm()) {
2179 tmp = rcache_get_reg(GET_Rm(), RC_GR_RMW);
2180 emith_add_r_imm(tmp, (1 << (op & 3)));
2181 }
2182 goto end_op;
2183 case 0x03:
2184 case 0x07 ... 0x0f:
2185 tmp = rcache_get_reg(GET_Rm(), RC_GR_READ);
2186 tmp2 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2187 switch (op & 0x0f)
2188 {
2189 case 0x03: // MOV Rm,Rn 0110nnnnmmmm0011
2190 emith_move_r_r(tmp2, tmp);
2191 break;
2192 case 0x07: // NOT Rm,Rn 0110nnnnmmmm0111
2193 emith_mvn_r_r(tmp2, tmp);
2194 break;
2195 case 0x08: // SWAP.B Rm,Rn 0110nnnnmmmm1000
2196 tmp3 = tmp2;
2197 if (tmp == tmp2)
2198 tmp3 = rcache_get_tmp();
2199 tmp4 = rcache_get_tmp();
2200 emith_lsr(tmp3, tmp, 16);
2201 emith_or_r_r_lsl(tmp3, tmp, 24);
2202 emith_and_r_r_imm(tmp4, tmp, 0xff00);
2203 emith_or_r_r_lsl(tmp3, tmp4, 8);
2204 emith_rol(tmp2, tmp3, 16);
2205 rcache_free_tmp(tmp4);
2206 if (tmp == tmp2)
2207 rcache_free_tmp(tmp3);
2208 break;
2209 case 0x09: // SWAP.W Rm,Rn 0110nnnnmmmm1001
2210 emith_rol(tmp2, tmp, 16);
2211 break;
2212 case 0x0a: // NEGC Rm,Rn 0110nnnnmmmm1010
2213 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2214 if (drcf.delayed_op)
2215 DELAY_SAVE_T(sr);
2216 emith_tpop_carry(sr, 1);
2217 emith_negcf_r_r(tmp2, tmp);
2218 emith_tpush_carry(sr, 1);
2219 break;
2220 case 0x0b: // NEG Rm,Rn 0110nnnnmmmm1011
2221 emith_neg_r_r(tmp2, tmp);
2222 break;
2223 case 0x0c: // EXTU.B Rm,Rn 0110nnnnmmmm1100
2224 emith_clear_msb(tmp2, tmp, 24);
2225 break;
2226 case 0x0d: // EXTU.W Rm,Rn 0110nnnnmmmm1101
2227 emith_clear_msb(tmp2, tmp, 16);
2228 break;
2229 case 0x0e: // EXTS.B Rm,Rn 0110nnnnmmmm1110
2230 emith_sext(tmp2, tmp, 8);
2231 break;
2232 case 0x0f: // EXTS.W Rm,Rn 0110nnnnmmmm1111
2233 emith_sext(tmp2, tmp, 16);
2234 break;
2235 }
2236 goto end_op;
2237 }
2238 goto default_;
2239
2240 /////////////////////////////////////////////
2241 case 0x07:
2242 // ADD #imm,Rn 0111nnnniiiiiiii
2243 tmp = rcache_get_reg(GET_Rn(), RC_GR_RMW);
2244 if (op & 0x80) { // adding negative
2245 emith_sub_r_imm(tmp, -op & 0xff);
2246 } else
2247 emith_add_r_imm(tmp, op & 0xff);
2248 goto end_op;
2249
2250 /////////////////////////////////////////////
2251 case 0x08:
2252 switch (op & 0x0f00)
2253 {
2254 case 0x0000: // MOV.B R0,@(disp,Rn) 10000000nnnndddd
2255 case 0x0100: // MOV.W R0,@(disp,Rn) 10000001nnnndddd
2256 rcache_clean();
2257 tmp = rcache_get_reg_arg(0, GET_Rm());
2258 tmp2 = rcache_get_reg_arg(1, SHR_R0);
2259 tmp3 = (op & 0x100) >> 8;
2260 if (op & 0x0f)
2261 emith_add_r_imm(tmp, (op & 0x0f) << tmp3);
2262 emit_memhandler_write(tmp3, pc, drcf.delayed_op);
2263 goto end_op;
2264 case 0x0400: // MOV.B @(disp,Rm),R0 10000100mmmmdddd
2265 case 0x0500: // MOV.W @(disp,Rm),R0 10000101mmmmdddd
2266 tmp = (op & 0x100) >> 8;
2267 emit_memhandler_read_rr(SHR_R0, GET_Rm(), (op & 0x0f) << tmp, tmp);
2268 goto end_op;
2269 case 0x0800: // CMP/EQ #imm,R0 10001000iiiiiiii
2270 // XXX: could use cmn
2271 tmp = rcache_get_tmp();
2272 tmp2 = rcache_get_reg(0, RC_GR_READ);
2273 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2274 if (drcf.delayed_op)
2275 DELAY_SAVE_T(sr);
2276 emith_move_r_imm_s8(tmp, op & 0xff);
2277 emith_bic_r_imm(sr, T);
2278 emith_cmp_r_r(tmp2, tmp);
2279 emit_or_t_if_eq(sr);
2280 rcache_free_tmp(tmp);
2281 goto end_op;
2282 case 0x0d00: // BT/S label 10001101dddddddd
2283 case 0x0f00: // BF/S label 10001111dddddddd
2284 DELAYED_OP;
2285 // fallthrough
2286 case 0x0900: // BT label 10001001dddddddd
2287 case 0x0b00: // BF label 10001011dddddddd
2288 // will handle conditional branches later
2289 pending_branch_cond = (op & 0x0200) ? DCOND_EQ : DCOND_NE;
2290 i = ((signed int)(op << 24) >> 23);
2291 pending_branch_pc = pc + i + 2;
2292 goto end_op;
2293 }
2294 goto default_;
2295
2296 /////////////////////////////////////////////
2297 case 0x09:
2298 // MOV.W @(disp,PC),Rn 1001nnnndddddddd
2299 tmp = pc + (op & 0xff) * 2 + 2;
2300#if PROPAGATE_CONSTANTS
2301 if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
2302 ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
2303 gconst_new(GET_Rn(), (u32)(int)(signed short)FETCH_OP(tmp));
2304 }
2305 else
2306#endif
2307 {
2308 tmp2 = rcache_get_tmp_arg(0);
2309 emith_move_r_imm(tmp2, tmp);
2310 tmp2 = emit_memhandler_read(1);
2311 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2312 emith_sext(tmp3, tmp2, 16);
2313 rcache_free_tmp(tmp2);
2314 }
2315 goto end_op;
2316
2317 /////////////////////////////////////////////
2318 case 0x0a:
2319 // BRA label 1010dddddddddddd
2320 DELAYED_OP;
2321 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2322 tmp = ((signed int)(op << 20) >> 19);
2323 out_pc = pc + tmp + 2;
2324 if (tmp == (u32)-4)
2325 emith_clear_msb(sr, sr, 20); // burn cycles
2326 cycles++;
2327 break;
2328
2329 /////////////////////////////////////////////
2330 case 0x0b:
2331 // BSR label 1011dddddddddddd
2332 DELAYED_OP;
2333 emit_move_r_imm32(SHR_PR, pc + 2);
2334 tmp = ((signed int)(op << 20) >> 19);
2335 out_pc = pc + tmp + 2;
2336 cycles++;
2337 break;
2338
2339 /////////////////////////////////////////////
2340 case 0x0c:
2341 switch (op & 0x0f00)
2342 {
2343 case 0x0000: // MOV.B R0,@(disp,GBR) 11000000dddddddd
2344 case 0x0100: // MOV.W R0,@(disp,GBR) 11000001dddddddd
2345 case 0x0200: // MOV.L R0,@(disp,GBR) 11000010dddddddd
2346 rcache_clean();
2347 tmp = rcache_get_reg_arg(0, SHR_GBR);
2348 tmp2 = rcache_get_reg_arg(1, SHR_R0);
2349 tmp3 = (op & 0x300) >> 8;
2350 emith_add_r_imm(tmp, (op & 0xff) << tmp3);
2351 emit_memhandler_write(tmp3, pc, drcf.delayed_op);
2352 goto end_op;
2353 case 0x0400: // MOV.B @(disp,GBR),R0 11000100dddddddd
2354 case 0x0500: // MOV.W @(disp,GBR),R0 11000101dddddddd
2355 case 0x0600: // MOV.L @(disp,GBR),R0 11000110dddddddd
2356 tmp = (op & 0x300) >> 8;
2357 emit_memhandler_read_rr(SHR_R0, SHR_GBR, (op & 0xff) << tmp, tmp);
2358 goto end_op;
2359 case 0x0300: // TRAPA #imm 11000011iiiiiiii
2360 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
2361 emith_sub_r_imm(tmp, 4*2);
2362 // push SR
2363 tmp = rcache_get_reg_arg(0, SHR_SP);
2364 emith_add_r_imm(tmp, 4);
2365 tmp = rcache_get_reg_arg(1, SHR_SR);
2366 emith_clear_msb(tmp, tmp, 22);
2367 emit_memhandler_write(2, pc, drcf.delayed_op);
2368 // push PC
2369 rcache_get_reg_arg(0, SHR_SP);
2370 tmp = rcache_get_tmp_arg(1);
2371 emith_move_r_imm(tmp, pc);
2372 emit_memhandler_write(2, pc, drcf.delayed_op);
2373 // obtain new PC
2374 emit_memhandler_read_rr(SHR_PC, SHR_VBR, (op & 0xff) * 4, 2);
2375 out_pc = (u32)-1;
2376 cycles += 7;
2377 goto end_op;
2378 case 0x0700: // MOVA @(disp,PC),R0 11000111dddddddd
2379 emit_move_r_imm32(SHR_R0, (pc + (op & 0xff) * 4 + 2) & ~3);
2380 goto end_op;
2381 case 0x0800: // TST #imm,R0 11001000iiiiiiii
2382 tmp = rcache_get_reg(SHR_R0, RC_GR_READ);
2383 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2384 if (drcf.delayed_op)
2385 DELAY_SAVE_T(sr);
2386 emith_bic_r_imm(sr, T);
2387 emith_tst_r_imm(tmp, op & 0xff);
2388 emit_or_t_if_eq(sr);
2389 goto end_op;
2390 case 0x0900: // AND #imm,R0 11001001iiiiiiii
2391 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2392 emith_and_r_imm(tmp, op & 0xff);
2393 goto end_op;
2394 case 0x0a00: // XOR #imm,R0 11001010iiiiiiii
2395 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2396 emith_eor_r_imm(tmp, op & 0xff);
2397 goto end_op;
2398 case 0x0b00: // OR #imm,R0 11001011iiiiiiii
2399 tmp = rcache_get_reg(SHR_R0, RC_GR_RMW);
2400 emith_or_r_imm(tmp, op & 0xff);
2401 goto end_op;
2402 case 0x0c00: // TST.B #imm,@(R0,GBR) 11001100iiiiiiii
2403 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2404 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2405 if (drcf.delayed_op)
2406 DELAY_SAVE_T(sr);
2407 emith_bic_r_imm(sr, T);
2408 emith_tst_r_imm(tmp, op & 0xff);
2409 emit_or_t_if_eq(sr);
2410 rcache_free_tmp(tmp);
2411 cycles += 2;
2412 goto end_op;
2413 case 0x0d00: // AND.B #imm,@(R0,GBR) 11001101iiiiiiii
2414 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2415 emith_and_r_imm(tmp, op & 0xff);
2416 goto end_rmw_op;
2417 case 0x0e00: // XOR.B #imm,@(R0,GBR) 11001110iiiiiiii
2418 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2419 emith_eor_r_imm(tmp, op & 0xff);
2420 goto end_rmw_op;
2421 case 0x0f00: // OR.B #imm,@(R0,GBR) 11001111iiiiiiii
2422 tmp = emit_indirect_indexed_read(SHR_R0, SHR_GBR, 0);
2423 emith_or_r_imm(tmp, op & 0xff);
2424 end_rmw_op:
2425 tmp2 = rcache_get_tmp_arg(1);
2426 emith_move_r_r(tmp2, tmp);
2427 rcache_free_tmp(tmp);
2428 tmp3 = rcache_get_reg_arg(0, SHR_GBR);
2429 tmp4 = rcache_get_reg(SHR_R0, RC_GR_READ);
2430 emith_add_r_r(tmp3, tmp4);
2431 emit_memhandler_write(0, pc, drcf.delayed_op);
2432 cycles += 2;
2433 goto end_op;
2434 }
2435 goto default_;
2436
2437 /////////////////////////////////////////////
2438 case 0x0d:
2439 // MOV.L @(disp,PC),Rn 1101nnnndddddddd
2440 tmp = (pc + (op & 0xff) * 4 + 2) & ~3;
2441#if PROPAGATE_CONSTANTS
2442 if (tmp < end_pc + MAX_LITERAL_OFFSET && literal_addr_count < MAX_LITERALS) {
2443 ADD_TO_ARRAY(literal_addr, literal_addr_count, tmp,);
2444 gconst_new(GET_Rn(), FETCH32(tmp));
2445 }
2446 else
2447#endif
2448 {
2449 tmp2 = rcache_get_tmp_arg(0);
2450 emith_move_r_imm(tmp2, tmp);
2451 tmp2 = emit_memhandler_read(2);
2452 tmp3 = rcache_get_reg(GET_Rn(), RC_GR_WRITE);
2453 emith_move_r_r(tmp3, tmp2);
2454 rcache_free_tmp(tmp2);
2455 }
2456 goto end_op;
2457
2458 /////////////////////////////////////////////
2459 case 0x0e:
2460 // MOV #imm,Rn 1110nnnniiiiiiii
2461 emit_move_r_imm32(GET_Rn(), (u32)(signed int)(signed char)op);
2462 goto end_op;
2463
2464 default:
2465 default_:
2466 elprintf(EL_ANOMALY, "%csh2 drc: unhandled op %04x @ %08x",
2467 sh2->is_slave ? 's' : 'm', op, pc - 2);
2468 break;
2469 }
2470
2471end_op:
2472 rcache_unlock_all();
2473
2474 // conditional branch handling (with/without delay)
2475 if (pending_branch_cond != -1 && drcf.delayed_op != 2)
2476 {
2477 u32 target_pc = pending_branch_pc;
2478 int ctaken = drcf.delayed_op ? 1 : 2;
2479 void *target;
2480
2481 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2482 FLUSH_CYCLES(sr);
2483 if (drcf.use_saved_t)
2484 emith_tst_r_imm(sr, T_save);
2485 else
2486 emith_tst_r_imm(sr, T);
2487
2488 // handle cycles
2489 emith_sub_r_imm_c(pending_branch_cond, sr, ctaken<<12);
2490 rcache_clean();
2491
2492#if LINK_BRANCHES
2493 if (find_in_array(branch_target_pc, branch_target_count, target_pc) >= 0) {
2494 // local branch
2495 // XXX: jumps back can be linked already
2496 branch_patch_pc[branch_patch_count] = target_pc;
2497 branch_patch_ptr[branch_patch_count] = tcache_ptr;
2498 emith_jump_cond_patchable(pending_branch_cond, tcache_ptr);
2499
2500 branch_patch_count++;
2501 if (branch_patch_count == MAX_LOCAL_BRANCHES) {
2502 dbg(1, "warning: too many local branches");
2503 break;
2504 }
2505 }
2506 else
2507#endif
2508 {
2509 // can't resolve branch locally, make a block exit
2510 emit_move_r_imm32(SHR_PC, target_pc);
2511 rcache_clean();
2512
2513 target = dr_prepare_ext_branch(target_pc, sh2, tcache_id);
2514 if (target == NULL)
2515 return NULL;
2516 emith_jump_cond_patchable(pending_branch_cond, target);
2517 }
2518
2519 drcf.use_saved_t = 0;
2520 pending_branch_cond = -1;
2521 }
2522
2523 // test irq?
2524 // XXX: delay slots..
2525 if (drcf.test_irq && drcf.delayed_op != 2) {
2526 if (!drcf.delayed_op)
2527 emit_move_r_imm32(SHR_PC, pc);
2528 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2529 FLUSH_CYCLES(sr);
2530 rcache_flush();
2531 emith_call(sh2_drc_test_irq);
2532 drcf.test_irq = 0;
2533 }
2534
2535 do_host_disasm(tcache_id);
2536
2537 if (out_pc != 0 && drcf.delayed_op != 2)
2538 break;
2539 }
2540
2541 tmp = rcache_get_reg(SHR_SR, RC_GR_RMW);
2542 FLUSH_CYCLES(tmp);
2543 rcache_flush();
2544
2545 if (out_pc == (u32)-1) {
2546 // indirect jump -> back to dispatcher
2547 emith_jump(sh2_drc_dispatcher);
2548 } else {
2549 void *target;
2550 if (out_pc == 0)
2551 out_pc = pc;
2552 emit_move_r_imm32(SHR_PC, out_pc);
2553 rcache_flush();
2554
2555 target = dr_prepare_ext_branch(out_pc, sh2, tcache_id);
2556 if (target == NULL)
2557 return NULL;
2558 emith_jump_patchable(target);
2559 }
2560
2561 // link local branches
2562 for (i = 0; i < branch_patch_count; i++) {
2563 void *target;
2564 int t;
2565 t = find_in_array(branch_target_pc, branch_target_count, branch_patch_pc[i]);
2566 target = branch_target_ptr[t];
2567 if (target == NULL) {
2568 // flush pc and go back to dispatcher (this should no longer happen)
2569 dbg(1, "stray branch to %08x %p", branch_patch_pc[i], tcache_ptr);
2570 target = tcache_ptr;
2571 emit_move_r_imm32(SHR_PC, branch_patch_pc[i]);
2572 rcache_flush();
2573 emith_jump(sh2_drc_dispatcher);
2574 }
2575 emith_jump_patch(branch_patch_ptr[i], target);
2576 }
2577
2578 end_pc = pc;
2579
2580 // mark memory blocks as containing compiled code
2581 // override any overlay blocks as they become unreachable anyway
2582 if (tcache_id != 0 || (this_block->addr & 0xc7fc0000) == 0x06000000)
2583 {
2584 u16 *p, *drc_ram_blk = NULL;
2585 u32 mask = 0, shift = 0;
2586
2587 if (tcache_id != 0) {
2588 // data array, BIOS
2589 drc_ram_blk = Pico32xMem->drcblk_da[sh2->is_slave];
2590 shift = SH2_DRCBLK_DA_SHIFT;
2591 mask = 0xfff/2;
2592 }
2593 else if ((this_block->addr & 0xc7fc0000) == 0x06000000) {
2594 // SDRAM
2595 drc_ram_blk = Pico32xMem->drcblk_ram;
2596 shift = SH2_DRCBLK_RAM_SHIFT;
2597 mask = 0x3ffff/2;
2598 }
2599
2600 drc_ram_blk[(base_pc >> shift) & mask] = (blkid_main << 1) | 1;
2601 for (pc = base_pc + 2; pc < end_pc; pc += 2) {
2602 p = &drc_ram_blk[(pc >> shift) & mask];
2603 if (*p && *p != (blkid_main << 1))
2604 *p = BLOCKID_OVERLAP; // block intersection..
2605 else
2606 *p = blkid_main << 1;
2607 }
2608
2609 // mark block entries (used by dr_get_bd())
2610 for (i = 0; i < branch_target_count; i++)
2611 if (branch_target_blkid[i] != 0)
2612 drc_ram_blk[(branch_target_pc[i] >> shift) & mask] =
2613 (branch_target_blkid[i] << 1) | 1;
2614
2615 // mark literals
2616 for (i = 0; i < literal_addr_count; i++) {
2617 tmp = literal_addr[i];
2618 p = &drc_ram_blk[(tmp >> shift) & mask];
2619 if (*p && *p != (blkid_main << 1))
2620 *p = BLOCKID_OVERLAP;
2621 else
2622 *p = blkid_main << 1;
2623 if (!(tmp & 3) && shift == 1)
2624 p[1] = p[0]; // assume long
2625 }
2626 }
2627
2628 tcache_ptrs[tcache_id] = tcache_ptr;
2629
2630 host_instructions_updated(block_entry, tcache_ptr);
2631
2632 do_host_disasm(tcache_id);
2633 dbg(2, " block #%d,%d tcache %d/%d, insns %d -> %d %.3f",
2634 tcache_id, block_counts[tcache_id],
2635 tcache_ptr - tcache_bases[tcache_id], tcache_sizes[tcache_id],
2636 insns_compiled, host_insn_count, (double)host_insn_count / insns_compiled);
2637 if ((sh2->pc & 0xc6000000) == 0x02000000) // ROM
2638 dbg(2, " hash collisions %d/%d", hash_collisions, block_counts[tcache_id]);
2639/*
2640 printf("~~~\n");
2641 tcache_dsm_ptrs[tcache_id] = block_entry;
2642 do_host_disasm(tcache_id);
2643 printf("~~~\n");
2644*/
2645
2646#if (DRC_DEBUG & 4)
2647 fflush(stdout);
2648#endif
2649
2650 return block_entry;
2651}
2652
2653static void sh2_generate_utils(void)
2654{
2655 int arg0, arg1, arg2, sr, tmp;
2656 void *sh2_drc_write_end, *sh2_drc_write_slot_end;
2657
2658 sh2_drc_write32 = p32x_sh2_write32;
2659 sh2_drc_read8 = p32x_sh2_read8;
2660 sh2_drc_read16 = p32x_sh2_read16;
2661 sh2_drc_read32 = p32x_sh2_read32;
2662
2663 host_arg2reg(arg0, 0);
2664 host_arg2reg(arg1, 1);
2665 host_arg2reg(arg2, 2);
2666 emith_move_r_r(arg0, arg0); // nop
2667
2668 // sh2_drc_exit(void)
2669 sh2_drc_exit = (void *)tcache_ptr;
2670 emit_do_static_regs(1, arg2);
2671 emith_sh2_drc_exit();
2672
2673 // sh2_drc_dispatcher(void)
2674 sh2_drc_dispatcher = (void *)tcache_ptr;
2675 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2676 emith_cmp_r_imm(sr, 0);
2677 emith_jump_cond(DCOND_LT, sh2_drc_exit);
2678 rcache_invalidate();
2679 emith_ctx_read(arg0, SHR_PC * 4);
2680 emith_ctx_read(arg1, offsetof(SH2, is_slave));
2681 emith_add_r_r_imm(arg2, CONTEXT_REG, offsetof(SH2, drc_tmp));
2682 emith_call(dr_lookup_block);
2683 emit_block_entry();
2684 // lookup failed, call sh2_translate()
2685 emith_move_r_r(arg0, CONTEXT_REG);
2686 emith_ctx_read(arg1, offsetof(SH2, drc_tmp)); // tcache_id
2687 emith_call(sh2_translate);
2688 emit_block_entry();
2689 // sh2_translate() failed, flush cache and retry
2690 emith_ctx_read(arg0, offsetof(SH2, drc_tmp));
2691 emith_call(flush_tcache);
2692 emith_move_r_r(arg0, CONTEXT_REG);
2693 emith_ctx_read(arg1, offsetof(SH2, drc_tmp));
2694 emith_call(sh2_translate);
2695 emit_block_entry();
2696 // XXX: can't translate, fail
2697 emith_call(dr_failure);
2698
2699 // sh2_drc_test_irq(void)
2700 // assumes it's called from main function (may jump to dispatcher)
2701 sh2_drc_test_irq = (void *)tcache_ptr;
2702 emith_ctx_read(arg1, offsetof(SH2, pending_level));
2703 sr = rcache_get_reg(SHR_SR, RC_GR_READ);
2704 emith_lsr(arg0, sr, I_SHIFT);
2705 emith_and_r_imm(arg0, 0x0f);
2706 emith_cmp_r_r(arg1, arg0); // pending_level > ((sr >> 4) & 0x0f)?
2707 EMITH_SJMP_START(DCOND_GT);
2708 emith_ret_c(DCOND_LE); // nope, return
2709 EMITH_SJMP_END(DCOND_GT);
2710 // adjust SP
2711 tmp = rcache_get_reg(SHR_SP, RC_GR_RMW);
2712 emith_sub_r_imm(tmp, 4*2);
2713 rcache_clean();
2714 // push SR
2715 tmp = rcache_get_reg_arg(0, SHR_SP);
2716 emith_add_r_imm(tmp, 4);
2717 tmp = rcache_get_reg_arg(1, SHR_SR);
2718 emith_clear_msb(tmp, tmp, 22);
2719 emith_move_r_r(arg2, CONTEXT_REG);
2720 emith_call(p32x_sh2_write32); // XXX: use sh2_drc_write32?
2721 rcache_invalidate();
2722 // push PC
2723 rcache_get_reg_arg(0, SHR_SP);
2724 emith_ctx_read(arg1, SHR_PC * 4);
2725 emith_move_r_r(arg2, CONTEXT_REG);
2726 emith_call(p32x_sh2_write32);
2727 rcache_invalidate();
2728 // update I, cycles, do callback
2729 emith_ctx_read(arg1, offsetof(SH2, pending_level));
2730 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2731 emith_bic_r_imm(sr, I);
2732 emith_or_r_r_lsl(sr, arg1, I_SHIFT);
2733 emith_sub_r_imm(sr, 13 << 12); // at least 13 cycles
2734 rcache_flush();
2735 emith_move_r_r(arg0, CONTEXT_REG);
2736 emith_call_ctx(offsetof(SH2, irq_callback)); // vector = sh2->irq_callback(sh2, level);
2737 // obtain new PC
2738 emith_lsl(arg0, arg0, 2);
2739 emith_ctx_read(arg1, SHR_VBR * 4);
2740 emith_add_r_r(arg0, arg1);
2741 emit_memhandler_read(2);
2742 emith_ctx_write(arg0, SHR_PC * 4);
2743#ifdef __i386__
2744 emith_add_r_imm(xSP, 4); // fix stack
2745#endif
2746 emith_jump(sh2_drc_dispatcher);
2747 rcache_invalidate();
2748
2749 // sh2_drc_entry(SH2 *sh2)
2750 sh2_drc_entry = (void *)tcache_ptr;
2751 emith_sh2_drc_entry();
2752 emith_move_r_r(CONTEXT_REG, arg0); // move ctx, arg0
2753 emit_do_static_regs(0, arg2);
2754 emith_call(sh2_drc_test_irq);
2755 emith_jump(sh2_drc_dispatcher);
2756
2757 // write-caused irq detection
2758 sh2_drc_write_end = tcache_ptr;
2759 emith_tst_r_r(arg0, arg0);
2760 EMITH_SJMP_START(DCOND_NE);
2761 emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp)); // return
2762 EMITH_SJMP_END(DCOND_NE);
2763 emith_call(sh2_drc_test_irq);
2764 emith_jump_ctx(offsetof(SH2, drc_tmp));
2765
2766 // write-caused irq detection for writes in delay slot
2767 sh2_drc_write_slot_end = tcache_ptr;
2768 emith_tst_r_r(arg0, arg0);
2769 EMITH_SJMP_START(DCOND_NE);
2770 emith_jump_ctx_c(DCOND_EQ, offsetof(SH2, drc_tmp));
2771 EMITH_SJMP_END(DCOND_NE);
2772 // just burn cycles to get back to dispatcher after branch is handled
2773 sr = rcache_get_reg(SHR_SR, RC_GR_RMW);
2774 emith_ctx_write(sr, offsetof(SH2, irq_cycles));
2775 emith_clear_msb(sr, sr, 20); // clear cycles
2776 rcache_flush();
2777 emith_jump_ctx(offsetof(SH2, drc_tmp));
2778
2779 // sh2_drc_write8(u32 a, u32 d)
2780 sh2_drc_write8 = (void *)tcache_ptr;
2781 emith_ret_to_ctx(offsetof(SH2, drc_tmp));
2782 emith_ctx_read(arg2, offsetof(SH2, write8_tab));
2783 emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
2784
2785 // sh2_drc_write16(u32 a, u32 d)
2786 sh2_drc_write16 = (void *)tcache_ptr;
2787 emith_ret_to_ctx(offsetof(SH2, drc_tmp));
2788 emith_ctx_read(arg2, offsetof(SH2, write16_tab));
2789 emith_sh2_wcall(arg0, arg2, sh2_drc_write_end);
2790
2791 // sh2_drc_write8_slot(u32 a, u32 d)
2792 sh2_drc_write8_slot = (void *)tcache_ptr;
2793 emith_ret_to_ctx(offsetof(SH2, drc_tmp));
2794 emith_ctx_read(arg2, offsetof(SH2, write8_tab));
2795 emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
2796
2797 // sh2_drc_write16_slot(u32 a, u32 d)
2798 sh2_drc_write16_slot = (void *)tcache_ptr;
2799 emith_ret_to_ctx(offsetof(SH2, drc_tmp));
2800 emith_ctx_read(arg2, offsetof(SH2, write16_tab));
2801 emith_sh2_wcall(arg0, arg2, sh2_drc_write_slot_end);
2802
2803#ifdef PDB_NET
2804 // debug
2805 #define MAKE_READ_WRAPPER(func) { \
2806 void *tmp = (void *)tcache_ptr; \
2807 emith_push_ret(); \
2808 emith_call(func); \
2809 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
2810 emith_addf_r_r(arg2, arg0); \
2811 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
2812 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
2813 emith_adc_r_imm(arg2, 0x01000000); \
2814 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
2815 emith_pop_and_ret(); \
2816 func = tmp; \
2817 }
2818 #define MAKE_WRITE_WRAPPER(func) { \
2819 void *tmp = (void *)tcache_ptr; \
2820 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[0])); \
2821 emith_addf_r_r(arg2, arg1); \
2822 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[0])); \
2823 emith_ctx_read(arg2, offsetof(SH2, pdb_io_csum[1])); \
2824 emith_adc_r_imm(arg2, 0x01000000); \
2825 emith_ctx_write(arg2, offsetof(SH2, pdb_io_csum[1])); \
2826 emith_move_r_r(arg2, CONTEXT_REG); \
2827 emith_jump(func); \
2828 func = tmp; \
2829 }
2830
2831 MAKE_READ_WRAPPER(sh2_drc_read8);
2832 MAKE_READ_WRAPPER(sh2_drc_read16);
2833 MAKE_READ_WRAPPER(sh2_drc_read32);
2834 MAKE_WRITE_WRAPPER(sh2_drc_write8);
2835 MAKE_WRITE_WRAPPER(sh2_drc_write8_slot);
2836 MAKE_WRITE_WRAPPER(sh2_drc_write16);
2837 MAKE_WRITE_WRAPPER(sh2_drc_write16_slot);
2838 MAKE_WRITE_WRAPPER(sh2_drc_write32);
2839#if (DRC_DEBUG & 4)
2840 host_dasm_new_symbol(sh2_drc_read8);
2841 host_dasm_new_symbol(sh2_drc_read16);
2842 host_dasm_new_symbol(sh2_drc_read32);
2843 host_dasm_new_symbol(sh2_drc_write32);
2844#endif
2845#endif
2846
2847 rcache_invalidate();
2848#if (DRC_DEBUG & 4)
2849 host_dasm_new_symbol(sh2_drc_entry);
2850 host_dasm_new_symbol(sh2_drc_dispatcher);
2851 host_dasm_new_symbol(sh2_drc_exit);
2852 host_dasm_new_symbol(sh2_drc_test_irq);
2853 host_dasm_new_symbol(sh2_drc_write_end);
2854 host_dasm_new_symbol(sh2_drc_write_slot_end);
2855 host_dasm_new_symbol(sh2_drc_write8);
2856 host_dasm_new_symbol(sh2_drc_write8_slot);
2857 host_dasm_new_symbol(sh2_drc_write16);
2858 host_dasm_new_symbol(sh2_drc_write16_slot);
2859#endif
2860}
2861
2862static void sh2_smc_rm_block_entry(block_desc *bd, int tcache_id)
2863{
2864 void *tmp;
2865
2866 // XXX: kill links somehow?
2867 dbg(2, " killing entry %08x-%08x, blkid %d,%d",
2868 bd->addr, bd->end_addr, tcache_id, bd - block_tables[tcache_id]);
2869 if (bd->addr == 0 || bd->tcache_ptr == NULL) {
2870 dbg(1, " killing dead block!? %08x", bd->addr);
2871 return;
2872 }
2873
2874 // since we never reuse space of dead blocks,
2875 // insert jump to dispatcher for blocks that are linked to this point
2876 //emith_jump_at(bd->tcache_ptr, sh2_drc_dispatcher);
2877
2878 // attempt to handle self-modifying blocks by exiting at nearest known PC
2879 tmp = tcache_ptr;
2880 tcache_ptr = bd->tcache_ptr;
2881 emit_move_r_imm32(SHR_PC, bd->addr);
2882 rcache_flush();
2883 emith_jump(sh2_drc_dispatcher);
2884
2885 host_instructions_updated(bd->tcache_ptr, tcache_ptr);
2886 tcache_ptr = tmp;
2887
2888 bd->addr = bd->end_addr = 0;
2889}
2890
2891static void sh2_smc_rm_block(u32 a, u16 *drc_ram_blk, int tcache_id, u32 shift, u32 mask)
2892{
2893 block_desc *btab = block_tables[tcache_id];
2894 u16 *p = drc_ram_blk + ((a & mask) >> shift);
2895 u16 *pmax = drc_ram_blk + (mask >> shift);
2896 u32 id = ~0, end_addr;
2897 int max_zeros = MAX_LITERAL_OFFSET >> shift;
2898 int i, zeros;
2899
2900 if (*p == 0 || (*p >> 1) >= BLOCKID_MAX) {
2901 u32 from = ~0, to = 0;
2902 dbg(1, "slow-remove blocks at @%08x", a);
2903 for (i = 0; i < block_counts[tcache_id]; i++) {
2904 if (btab[i].addr <= a && a < btab[i].end_addr) {
2905 if (btab[i].addr < from)
2906 from = btab[i].addr;
2907 if (btab[i].end_addr > to)
2908 to = btab[i].end_addr;
2909 sh2_smc_rm_block_entry(&btab[i], tcache_id);
2910 }
2911 }
2912 if (from < to) {
2913 p = drc_ram_blk + ((from & mask) >> shift);
2914 memset(p, 0, (to - from) >> (shift - 1));
2915 }
2916 return;
2917 }
2918
2919 // use end_addr to distinguish the same block
2920 end_addr = btab[*p >> 1].end_addr;
2921
2922 // go up to the start
2923 for (zeros = 0; p > drc_ram_blk && zeros < max_zeros; p--) {
2924 // there can be holes because games sometimes keep variables
2925 // directly in literal pool and we don't inline them
2926 // to avoid recompile (Star Wars Arcade)
2927 if (p[-1] == 0) {
2928 zeros++;
2929 continue;
2930 }
2931 zeros = 0;
2932 if ((p[-1] >> 1) >= BLOCKID_MAX)
2933 break;
2934 if (btab[p[-1] >> 1].end_addr != end_addr)
2935 break;
2936 }
2937
2938 if (!(*p & 1))
2939 dbg(1, "smc rm: missing block start for %08x?", a);
2940
2941 // now go down and kill everything
2942 for (zeros = 0; p < pmax && zeros < max_zeros; p++) {
2943 if (*p == 0) {
2944 zeros++;
2945 continue;
2946 }
2947 zeros = 0;
2948 if ((*p >> 1) >= BLOCKID_MAX)
2949 break;
2950 if ((*p >> 1) == id) {
2951 *p = 0;
2952 continue;
2953 }
2954 id = *p >> 1;
2955 if (btab[id].end_addr != end_addr)
2956 break;
2957 *p = 0;
2958 sh2_smc_rm_block_entry(&btab[id], tcache_id);
2959 }
2960}
2961
2962void sh2_drc_wcheck_ram(unsigned int a, int val, int cpuid)
2963{
2964 dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
2965 sh2_smc_rm_block(a, Pico32xMem->drcblk_ram, 0, SH2_DRCBLK_RAM_SHIFT, 0x3ffff);
2966}
2967
2968void sh2_drc_wcheck_da(unsigned int a, int val, int cpuid)
2969{
2970 dbg(2, "%csh2 smc check @%08x", cpuid ? 's' : 'm', a);
2971 sh2_smc_rm_block(a, Pico32xMem->drcblk_da[cpuid],
2972 1 + cpuid, SH2_DRCBLK_DA_SHIFT, 0xfff);
2973}
2974
2975int sh2_execute(SH2 *sh2c, int cycles)
2976{
2977 int ret_cycles;
2978
2979 sh2c->cycles_timeslice = cycles;
2980
2981 // cycles are kept in SHR_SR unused bits (upper 20)
2982 // bit11 contains T saved for delay slot
2983 // others are usual SH2 flags
2984 sh2c->sr &= 0x3f3;
2985 sh2c->sr |= cycles << 12;
2986 sh2_drc_entry(sh2c);
2987
2988 // TODO: irq cycles
2989 ret_cycles = (signed int)sh2c->sr >> 12;
2990 if (ret_cycles > 0)
2991 dbg(1, "warning: drc returned with cycles: %d", ret_cycles);
2992
2993 return sh2c->cycles_timeslice - ret_cycles;
2994}
2995
2996#if (DRC_DEBUG & 2)
2997void block_stats(void)
2998{
2999 int c, b, i, total = 0;
3000
3001 printf("block stats:\n");
3002 for (b = 0; b < ARRAY_SIZE(block_tables); b++)
3003 for (i = 0; i < block_counts[b]; i++)
3004 if (block_tables[b][i].addr != 0)
3005 total += block_tables[b][i].refcount;
3006
3007 for (c = 0; c < 10; c++) {
3008 block_desc *blk, *maxb = NULL;
3009 int max = 0;
3010 for (b = 0; b < ARRAY_SIZE(block_tables); b++) {
3011 for (i = 0; i < block_counts[b]; i++) {
3012 blk = &block_tables[b][i];
3013 if (blk->addr != 0 && blk->refcount > max) {
3014 max = blk->refcount;
3015 maxb = blk;
3016 }
3017 }
3018 }
3019 if (maxb == NULL)
3020 break;
3021 printf("%08x %9d %2.3f%%\n", maxb->addr, maxb->refcount,
3022 (double)maxb->refcount / total * 100.0);
3023 maxb->refcount = 0;
3024 }
3025
3026 for (b = 0; b < ARRAY_SIZE(block_tables); b++)
3027 for (i = 0; i < block_counts[b]; i++)
3028 block_tables[b][i].refcount = 0;
3029}
3030#else
3031#define block_stats()
3032#endif
3033
3034void sh2_drc_flush_all(void)
3035{
3036 block_stats();
3037 flush_tcache(0);
3038 flush_tcache(1);
3039 flush_tcache(2);
3040}
3041
3042void sh2_drc_mem_setup(SH2 *sh2)
3043{
3044 // fill the convenience pointers
3045 sh2->p_bios = sh2->is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
3046 sh2->p_da = Pico32xMem->data_array[sh2->is_slave];
3047 sh2->p_sdram = Pico32xMem->sdram;
3048 sh2->p_rom = Pico.rom;
3049}
3050
3051int sh2_drc_init(SH2 *sh2)
3052{
3053 int i;
3054
3055 if (block_tables[0] == NULL)
3056 {
3057 for (i = 0; i < TCACHE_BUFFERS; i++) {
3058 block_tables[i] = calloc(block_max_counts[i], sizeof(*block_tables[0]));
3059 if (block_tables[i] == NULL)
3060 goto fail;
3061 // max 2 block links (exits) per block
3062 block_links[i] = calloc(block_max_counts[i] * 2, sizeof(*block_links[0]));
3063 if (block_links[i] == NULL)
3064 goto fail;
3065 }
3066 memset(block_counts, 0, sizeof(block_counts));
3067 memset(block_link_counts, 0, sizeof(block_link_counts));
3068
3069 drc_cmn_init();
3070 tcache_ptr = tcache;
3071 sh2_generate_utils();
3072 host_instructions_updated(tcache, tcache_ptr);
3073
3074 tcache_bases[0] = tcache_ptrs[0] = tcache_ptr;
3075 for (i = 1; i < ARRAY_SIZE(tcache_bases); i++)
3076 tcache_bases[i] = tcache_ptrs[i] = tcache_bases[i - 1] + tcache_sizes[i - 1];
3077
3078 // tmp
3079 PicoOpt |= POPT_DIS_VDP_FIFO;
3080
3081#if (DRC_DEBUG & 4)
3082 for (i = 0; i < ARRAY_SIZE(block_tables); i++)
3083 tcache_dsm_ptrs[i] = tcache_bases[i];
3084 // disasm the utils
3085 tcache_dsm_ptrs[0] = tcache;
3086 do_host_disasm(0);
3087#endif
3088#if (DRC_DEBUG & 1)
3089 hash_collisions = 0;
3090#endif
3091 }
3092
3093 if (hash_table == NULL) {
3094 hash_table = calloc(sizeof(hash_table[0]), MAX_HASH_ENTRIES);
3095 if (hash_table == NULL)
3096 goto fail;
3097 }
3098
3099 return 0;
3100
3101fail:
3102 sh2_drc_finish(sh2);
3103 return -1;
3104}
3105
3106void sh2_drc_finish(SH2 *sh2)
3107{
3108 int i;
3109
3110 if (block_tables[0] != NULL) {
3111 block_stats();
3112
3113 for (i = 0; i < TCACHE_BUFFERS; i++) {
3114#if (DRC_DEBUG & 4)
3115 printf("~~~ tcache %d\n", i);
3116 tcache_dsm_ptrs[i] = tcache_bases[i];
3117 tcache_ptr = tcache_ptrs[i];
3118 do_host_disasm(i);
3119#endif
3120
3121 if (block_tables[i] != NULL)
3122 free(block_tables[i]);
3123 block_tables[i] = NULL;
3124 if (block_links[i] == NULL)
3125 free(block_links[i]);
3126 block_links[i] = NULL;
3127 }
3128
3129 drc_cmn_cleanup();
3130 }
3131
3132 if (hash_table != NULL) {
3133 free(hash_table);
3134 hash_table = NULL;
3135 }
3136}
3137
3138#endif /* DRC_SH2 */
3139
3140static void *dr_get_pc_base(u32 pc, int is_slave)
3141{
3142 void *ret = NULL;
3143 u32 mask = 0;
3144
3145 if ((pc & ~0x7ff) == 0) {
3146 // BIOS
3147 ret = is_slave ? Pico32xMem->sh2_rom_s : Pico32xMem->sh2_rom_m;
3148 mask = 0x7ff;
3149 }
3150 else if ((pc & 0xfffff000) == 0xc0000000) {
3151 // data array
3152 ret = Pico32xMem->data_array[is_slave];
3153 mask = 0xfff;
3154 }
3155 else if ((pc & 0xc6000000) == 0x06000000) {
3156 // SDRAM
3157 ret = Pico32xMem->sdram;
3158 mask = 0x03ffff;
3159 }
3160 else if ((pc & 0xc6000000) == 0x02000000) {
3161 // ROM
3162 ret = Pico.rom;
3163 mask = 0x3fffff;
3164 }
3165
3166 if (ret == NULL)
3167 return (void *)-1; // NULL is valid value
3168
3169 return (char *)ret - (pc & ~mask);
3170}
3171
3172void scan_block(u32 base_pc, int is_slave, u8 *op_flags, u32 *end_pc)
3173{
3174 u16 *dr_pc_base;
3175 u32 pc, target, op;
3176 int cycles;
3177
3178 memset(op_flags, 0, BLOCK_CYCLE_LIMIT);
3179
3180 dr_pc_base = dr_get_pc_base(base_pc, is_slave);
3181
3182 for (cycles = 0, pc = base_pc; cycles < BLOCK_CYCLE_LIMIT-1; cycles++, pc += 2) {
3183 op = FETCH_OP(pc);
3184 if ((op & 0xf000) == 0xa000 || (op & 0xf000) == 0xb000) { // BRA, BSR
3185 signed int offs = ((signed int)(op << 20) >> 19);
3186 pc += 2;
3187 OP_FLAGS(pc) |= OF_DELAY_OP;
3188 target = pc + offs + 2;
3189 if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
3190 OP_FLAGS(target) |= OF_TARGET;
3191 break;
3192 }
3193 if ((op & 0xf000) == 0) {
3194 op &= 0xff;
3195 if (op == 0x1b) // SLEEP
3196 break;
3197 // BRAF, BSRF, RTS, RTE
3198 if (op == 0x23 || op == 0x03 || op == 0x0b || op == 0x2b) {
3199 pc += 2;
3200 OP_FLAGS(pc) |= OF_DELAY_OP;
3201 break;
3202 }
3203 continue;
3204 }
3205 if ((op & 0xf0df) == 0x400b) { // JMP, JSR
3206 pc += 2;
3207 OP_FLAGS(pc) |= OF_DELAY_OP;
3208 break;
3209 }
3210 if ((op & 0xf900) == 0x8900) { // BT(S), BF(S)
3211 signed int offs = ((signed int)(op << 24) >> 23);
3212 if (op & 0x0400)
3213 OP_FLAGS(pc + 2) |= OF_DELAY_OP;
3214 target = pc + offs + 4;
3215 if (base_pc <= target && target < base_pc + BLOCK_CYCLE_LIMIT * 2)
3216 OP_FLAGS(target) |= OF_TARGET;
3217 }
3218 if ((op & 0xff00) == 0xc300) // TRAPA
3219 break;
3220 }
3221 *end_pc = pc;
3222}
3223
3224// vim:shiftwidth=2:ts=2:expandtab