1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2011 Ari64 *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
22 #include <stdint.h> //include for uint64_t
27 #include <libkern/OSCacheControl.h>
30 #include <3ds_utils.h>
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h"
39 #include "../psxinterpreter.h"
41 #include "emu_if.h" // emulator interface
42 #include "linkage_offsets.h"
43 #include "compiler_features.h"
44 #include "arm_features.h"
47 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
50 #define min(a, b) ((b) < (a) ? (b) : (a))
53 #define max(a, b) ((b) > (a) ? (b) : (a))
58 //#define REGMAP_PRINT // with DISASM only
63 #define assem_debug printf
65 #define assem_debug(...)
67 //#define inv_debug printf
68 #define inv_debug(...)
71 #include "assem_x86.h"
74 #include "assem_x64.h"
77 #include "assem_arm.h"
80 #include "assem_arm64.h"
83 #define RAM_SIZE 0x200000
85 #define MAX_OUTPUT_BLOCK_SIZE 262144
86 #define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
87 #define PAGE_COUNT 1024
89 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
90 #define INVALIDATE_USE_COND_CALL
94 // apparently Vita has a 16MB limit, so either we cut tc in half,
95 // or use this hack (it's a hack because tc size was designed to be power-of-2)
96 #define TC_REDUCE_BYTES 4096
98 #define TC_REDUCE_BYTES 0
103 struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
104 const void *f[2048 / sizeof(void *)];
109 u_char translation_cache[(1 << TARGET_SIZE_2) - TC_REDUCE_BYTES];
110 struct ndrc_tramp tramp;
113 #ifdef BASE_ADDR_DYNAMIC
114 static struct ndrc_mem *ndrc;
116 static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
117 static struct ndrc_mem *ndrc = &ndrc_;
119 #ifdef TC_WRITE_OFFSET
121 # include <sys/types.h>
122 # include <sys/stat.h>
126 static long ndrc_write_ofs;
127 #define NDRC_WRITE_OFFSET(x) (void *)((char *)(x) + ndrc_write_ofs)
129 #define NDRC_WRITE_OFFSET(x) (x)
152 // regmap_pre[i] - regs before [i] insn starts; dirty things here that
153 // don't match .regmap will be written back
154 // [i].regmap_entry - regs that must be set up if someone jumps here
155 // [i].regmap - regs [i] insn will read/(over)write
156 // branch_regs[i].* - same as above but for branches, takes delay slot into account
159 signed char regmap_entry[HOST_REGS];
160 signed char regmap[HOST_REGS];
164 u_int wasconst; // before; for example 'lw r2, (r2)' wasconst is true
165 u_int isconst; // ... but isconst is false when r2 is known (hr)
166 u_int loadedconst; // host regs that have constants loaded
167 u_int noevict; // can't evict this hr (alloced by current op)
168 //u_int waswritten; // MIPS regs that were used as store base before
198 struct block_info *next;
201 u_int start; // vaddr of the block start
202 u_int len; // of the whole block source
207 u_char inv_near_misses;
225 static struct decoded_insn
228 u_char opcode; // bits 31-26
229 u_char opcode2; // (depends on opcode)
242 u_char is_delay_load:1; // is_load + MFC/CFC
243 u_char is_exception:1; // unconditional, also interp. fallback
244 u_char may_except:1; // might generate an exception
247 static struct compile_info
252 signed char min_free_regs;
254 signed char reserved[2];
258 static char invalid_code[0x100000];
259 static struct ht_entry hash_table[65536];
260 static struct block_info *blocks[PAGE_COUNT];
261 static struct jump_info *jumps[PAGE_COUNT];
263 static u_int *source;
264 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
265 static uint64_t gte_rt[MAXBLOCK];
266 static uint64_t gte_unneeded[MAXBLOCK];
267 static u_int smrv[32]; // speculated MIPS register values
268 static u_int smrv_strong; // mask or regs that are likely to have correct values
269 static u_int smrv_weak; // same, but somewhat less likely
270 static u_int smrv_strong_next; // same, but after current insn executes
271 static u_int smrv_weak_next;
272 static uint64_t unneeded_reg[MAXBLOCK];
273 static uint64_t branch_unneeded_reg[MAXBLOCK];
274 // see 'struct regstat' for a description
275 static signed char regmap_pre[MAXBLOCK][HOST_REGS];
276 // contains 'real' consts at [i] insn, but may differ from what's actually
277 // loaded in host reg as 'final' value is always loaded, see get_final_value()
278 static uint32_t current_constmap[HOST_REGS];
279 static uint32_t constmap[MAXBLOCK][HOST_REGS];
280 static struct regstat regs[MAXBLOCK];
281 static struct regstat branch_regs[MAXBLOCK];
283 static void *instr_addr[MAXBLOCK];
284 static struct link_entry link_addr[MAXBLOCK];
285 static int linkcount;
286 static struct code_stub stubs[MAXBLOCK*3];
287 static int stubcount;
288 static u_int literals[1024][2];
289 static int literalcount;
290 static int is_delayslot;
291 static char shadow[1048576] __attribute__((aligned(16)));
293 static u_int expirep;
294 static u_int stop_after_jal;
295 static u_int f1_hack;
297 static int stat_bc_direct;
298 static int stat_bc_pre;
299 static int stat_bc_restore;
300 static int stat_ht_lookups;
301 static int stat_jump_in_lookups;
302 static int stat_restore_tries;
303 static int stat_restore_compares;
304 static int stat_inv_addr_calls;
305 static int stat_inv_hits;
306 static int stat_blocks;
307 static int stat_links;
308 #define stat_inc(s) s++
309 #define stat_dec(s) s--
310 #define stat_clear(s) s = 0
314 #define stat_clear(s)
317 int new_dynarec_hacks;
318 int new_dynarec_hacks_pergame;
319 int new_dynarec_hacks_old;
320 int new_dynarec_did_compile;
322 #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
324 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0 (CCREG)
325 extern int last_count; // last absolute target, often = next_interupt
327 extern int pending_exception;
328 extern int branch_target;
329 extern uintptr_t ram_offset;
330 extern uintptr_t mini_ht[32][2];
332 /* registers that may be allocated */
334 #define LOREG 32 // lo
335 #define HIREG 33 // hi
336 //#define FSREG 34 // FPU status (FCSR)
337 //#define CSREG 35 // Coprocessor status
338 #define CCREG 36 // Cycle count
339 #define INVCP 37 // Pointer to invalid_code
340 //#define MMREG 38 // Pointer to memory_map
341 #define ROREG 39 // ram offset (if psxM != 0x80000000)
343 #define FTEMP 40 // Load/store temporary register (was fpu)
344 #define PTEMP 41 // Prefetch temporary register
345 //#define TLREG 42 // TLB mapping offset
346 #define RHASH 43 // Return address hash
347 #define RHTBL 44 // Return address hash table address
348 #define RTEMP 45 // JR/JALR address register
350 #define AGEN1 46 // Address generation temporary register (pass5b_preallocate2)
351 //#define AGEN2 47 // Address generation temporary register
353 /* instruction types */
354 #define NOP 0 // No operation
355 #define LOAD 1 // Load
356 #define STORE 2 // Store
357 #define LOADLR 3 // Unaligned load
358 #define STORELR 4 // Unaligned store
359 #define MOV 5 // Move (hi/lo only)
360 #define ALU 6 // Arithmetic/logic
361 #define MULTDIV 7 // Multiply/divide
362 #define SHIFT 8 // Shift by register
363 #define SHIFTIMM 9// Shift by immediate
364 #define IMM16 10 // 16-bit immediate
365 #define RJUMP 11 // Unconditional jump to register
366 #define UJUMP 12 // Unconditional jump
367 #define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
368 #define SJUMP 14 // Conditional branch (regimm format)
369 #define COP0 15 // Coprocessor 0
371 #define SYSCALL 22// SYSCALL,BREAK
372 #define OTHER 23 // Other/unknown - do nothing
373 #define HLECALL 26// PCSX fake opcodes for HLE
374 #define COP2 27 // Coprocessor 2 move
375 #define C2LS 28 // Coprocessor 2 load/store
376 #define C2OP 29 // Coprocessor 2 operation
377 #define INTCALL 30// Call interpreter to handle rare corner cases
383 #define DJT_1 (void *)1l // no function, just a label in assem_debug log
384 #define DJT_2 (void *)2l
389 void jump_syscall (u_int u0, u_int u1, u_int pc);
390 void jump_syscall_ds(u_int u0, u_int u1, u_int pc);
391 void jump_break (u_int u0, u_int u1, u_int pc);
392 void jump_break_ds(u_int u0, u_int u1, u_int pc);
393 void jump_overflow (u_int u0, u_int u1, u_int pc);
394 void jump_overflow_ds(u_int u0, u_int u1, u_int pc);
395 void jump_addrerror (u_int cause, u_int addr, u_int pc);
396 void jump_addrerror_ds(u_int cause, u_int addr, u_int pc);
397 void jump_to_new_pc();
398 void call_gteStall();
399 void new_dyna_leave();
401 void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile);
402 void *ndrc_get_addr_ht(u_int vaddr);
403 void ndrc_add_jump_out(u_int vaddr, void *src);
404 void ndrc_write_invalidate_one(u_int addr);
405 static void ndrc_write_invalidate_many(u_int addr, u_int end);
407 static int new_recompile_block(u_int addr);
408 static void invalidate_block(struct block_info *block);
409 static void exception_assemble(int i, const struct regstat *i_regs, int ccadj_);
411 // Needed by assembler
412 static void wb_register(signed char r, const signed char regmap[], uint64_t dirty);
413 static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty);
414 static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr);
415 static void load_all_regs(const signed char i_regmap[]);
416 static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
417 static void load_regs_entry(int t);
418 static void load_all_consts(const signed char regmap[], u_int dirty, int i);
419 static u_int get_host_reglist(const signed char *regmap);
421 static int get_final_value(int hr, int i, u_int *value);
422 static void add_stub(enum stub_type type, void *addr, void *retaddr,
423 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
424 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
425 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
426 static void add_to_linker(void *addr, u_int target, int ext);
427 static void *get_direct_memhandler(void *table, u_int addr,
428 enum stub_type type, uintptr_t *addr_host);
429 static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
430 static void pass_args(int a0, int a1);
431 static void emit_far_jump(const void *f);
432 static void emit_far_call(const void *f);
435 #include <psp2/kernel/sysmem.h>
437 // note: this interacts with RetroArch's Vita bootstrap code: bootstrap/vita/sbrk.c
438 extern int getVMBlock();
439 int _newlib_vm_size_user = sizeof(*ndrc);
442 static void mprotect_w_x(void *start, void *end, int is_x)
446 // *Open* enables write on all memory that was
447 // allocated by sceKernelAllocMemBlockForVM()?
449 sceKernelCloseVMDomain();
451 sceKernelOpenVMDomain();
452 #elif defined(HAVE_LIBNX)
454 // check to avoid the full flush in jitTransitionToExecutable()
455 if (g_jit.type != JitType_CodeMemory) {
457 rc = jitTransitionToExecutable(&g_jit);
459 rc = jitTransitionToWritable(&g_jit);
461 ;//SysPrintf("jitTransition %d %08x\n", is_x, rc);
463 #elif defined(TC_WRITE_OFFSET)
464 // separated rx and rw areas are always available
466 u_long mstart = (u_long)start & ~4095ul;
467 u_long mend = (u_long)end;
468 if (mprotect((void *)mstart, mend - mstart,
469 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
470 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
475 static void start_tcache_write(void *start, void *end)
477 mprotect_w_x(start, end, 0);
480 static void end_tcache_write(void *start, void *end)
482 #if defined(__arm__) || defined(__aarch64__)
483 size_t len = (char *)end - (char *)start;
484 #if defined(__BLACKBERRY_QNX__)
485 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
486 #elif defined(__MACH__)
487 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
489 sceKernelSyncVMDomain(sceBlock, start, len);
491 ctr_flush_invalidate_cache();
492 #elif defined(HAVE_LIBNX)
493 if (g_jit.type == JitType_CodeMemory) {
494 armDCacheClean(start, len);
495 armICacheInvalidate((char *)start - ndrc_write_ofs, len);
496 // as of v4.2.1 libnx lacks isb
497 __asm__ volatile("isb" ::: "memory");
499 #elif defined(__aarch64__)
500 // as of 2021, __clear_cache() is still broken on arm64
501 // so here is a custom one :(
502 clear_cache_arm64(start, end);
504 __clear_cache(start, end);
509 mprotect_w_x(start, end, 1);
512 static void *start_block(void)
514 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
515 if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
516 end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
517 start_tcache_write(NDRC_WRITE_OFFSET(out), NDRC_WRITE_OFFSET(end));
521 static void end_block(void *start)
523 end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(out));
526 #ifdef NDRC_CACHE_FLUSH_ALL
528 static int needs_clear_cache;
530 static void mark_clear_cache(void *target)
532 if (!needs_clear_cache) {
533 start_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
534 needs_clear_cache = 1;
538 static void do_clear_cache(void)
540 if (needs_clear_cache) {
541 end_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
542 needs_clear_cache = 0;
548 // also takes care of w^x mappings when patching code
549 static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
551 static void mark_clear_cache(void *target)
553 uintptr_t offset = (u_char *)target - ndrc->translation_cache;
554 u_int mask = 1u << ((offset >> 12) & 31);
555 if (!(needs_clear_cache[offset >> 17] & mask)) {
556 char *start = (char *)NDRC_WRITE_OFFSET((uintptr_t)target & ~4095l);
557 start_tcache_write(start, start + 4095);
558 needs_clear_cache[offset >> 17] |= mask;
562 // Clearing the cache is rather slow on ARM Linux, so mark the areas
563 // that need to be cleared, and then only clear these areas once.
564 static void do_clear_cache(void)
567 for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
569 u_int bitmap = needs_clear_cache[i];
572 for (j = 0; j < 32; j++)
575 if (!(bitmap & (1u << j)))
578 start = ndrc->translation_cache + i*131072 + j*4096;
580 for (j++; j < 32; j++) {
581 if (!(bitmap & (1u << j)))
585 end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(end));
587 needs_clear_cache[i] = 0;
591 #endif // NDRC_CACHE_FLUSH_ALL
593 #define NO_CYCLE_PENALTY_THR 12
595 int cycle_multiplier_old;
596 static int cycle_multiplier_active;
598 static int CLOCK_ADJUST(int x)
600 int m = cycle_multiplier_active;
601 int s = (x >> 31) | 1;
602 return (x * m + s * 50) / 100;
605 static int ds_writes_rjump_rs(int i)
607 return dops[i].rs1 != 0
608 && (dops[i].rs1 == dops[i+1].rt1 || dops[i].rs1 == dops[i+1].rt2
609 || dops[i].rs1 == dops[i].rt1); // overwrites itself - same effect
612 // psx addr mirror masking (for invalidation)
613 static u_int pmmask(u_int vaddr)
615 vaddr &= ~0xe0000000;
616 if (vaddr < 0x01000000)
617 vaddr &= ~0x00e00000; // RAM mirrors
621 static u_int get_page(u_int vaddr)
623 u_int page = pmmask(vaddr) >> 12;
624 if (page >= PAGE_COUNT / 2)
625 page = PAGE_COUNT / 2 + (page & (PAGE_COUNT / 2 - 1));
629 // get a page for looking for a block that has vaddr
630 // (needed because the block may start in previous page)
631 static u_int get_page_prev(u_int vaddr)
633 assert(MAXBLOCK <= (1 << 12));
634 u_int page = get_page(vaddr);
640 static struct ht_entry *hash_table_get(u_int vaddr)
642 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
645 static void hash_table_add(u_int vaddr, void *tcaddr)
647 struct ht_entry *ht_bin = hash_table_get(vaddr);
649 ht_bin->vaddr[1] = ht_bin->vaddr[0];
650 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
651 ht_bin->vaddr[0] = vaddr;
652 ht_bin->tcaddr[0] = tcaddr;
655 static void hash_table_remove(int vaddr)
657 //printf("remove hash: %x\n",vaddr);
658 struct ht_entry *ht_bin = hash_table_get(vaddr);
659 if (ht_bin->vaddr[1] == vaddr) {
660 ht_bin->vaddr[1] = -1;
661 ht_bin->tcaddr[1] = NULL;
663 if (ht_bin->vaddr[0] == vaddr) {
664 ht_bin->vaddr[0] = ht_bin->vaddr[1];
665 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
666 ht_bin->vaddr[1] = -1;
667 ht_bin->tcaddr[1] = NULL;
671 static void mark_invalid_code(u_int vaddr, u_int len, char invalid)
673 u_int vaddr_m = vaddr & 0x1fffffff;
675 for (i = vaddr_m & ~0xfff; i < vaddr_m + len; i += 0x1000) {
676 // ram mirrors, but should not hurt bios
677 for (j = 0; j < 0x800000; j += 0x200000) {
678 invalid_code[(i|j) >> 12] =
679 invalid_code[(i|j|0x80000000u) >> 12] =
680 invalid_code[(i|j|0xa0000000u) >> 12] = invalid;
683 if (!invalid && vaddr + len > inv_code_start && vaddr <= inv_code_end)
684 inv_code_start = inv_code_end = ~0;
687 static int doesnt_expire_soon(u_char *tcaddr)
689 u_int diff = (u_int)(tcaddr - out) & ((1u << TARGET_SIZE_2) - 1u);
690 return diff > EXPIRITY_OFFSET + MAX_OUTPUT_BLOCK_SIZE;
693 static unused void check_for_block_changes(u_int start, u_int end)
695 u_int start_page = get_page_prev(start);
696 u_int end_page = get_page(end - 1);
699 for (page = start_page; page <= end_page; page++) {
700 struct block_info *block;
701 for (block = blocks[page]; block != NULL; block = block->next) {
704 if (memcmp(block->source, block->copy, block->len)) {
705 printf("bad block %08x-%08x %016llx %016llx @%08x\n",
706 block->start, block->start + block->len,
707 *(long long *)block->source, *(long long *)block->copy, psxRegs.pc);
715 static void *try_restore_block(u_int vaddr, u_int start_page, u_int end_page)
717 void *found_clean = NULL;
720 stat_inc(stat_restore_tries);
721 for (page = start_page; page <= end_page; page++) {
722 struct block_info *block;
723 for (block = blocks[page]; block != NULL; block = block->next) {
724 if (vaddr < block->start)
726 if (!block->is_dirty || vaddr >= block->start + block->len)
728 for (i = 0; i < block->jump_in_cnt; i++)
729 if (block->jump_in[i].vaddr == vaddr)
731 if (i == block->jump_in_cnt)
733 assert(block->source && block->copy);
734 stat_inc(stat_restore_compares);
735 if (memcmp(block->source, block->copy, block->len))
738 block->is_dirty = block->inv_near_misses = 0;
739 found_clean = block->jump_in[i].addr;
740 hash_table_add(vaddr, found_clean);
741 mark_invalid_code(block->start, block->len, 0);
742 stat_inc(stat_bc_restore);
743 inv_debug("INV: restored %08x %p (%d)\n", vaddr, found_clean, block->jump_in_cnt);
750 // this doesn't normally happen
751 static noinline u_int generate_exception(u_int pc)
753 //if (execBreakCheck(&psxRegs, pc))
754 // return psxRegs.pc;
756 // generate an address or bus error
757 psxRegs.CP0.n.Cause &= 0x300;
758 psxRegs.CP0.n.EPC = pc;
760 psxRegs.CP0.n.Cause |= R3000E_AdEL << 2;
761 psxRegs.CP0.n.BadVAddr = pc;
766 psxRegs.CP0.n.Cause |= R3000E_IBE << 2;
767 return (psxRegs.pc = 0x80000080);
770 // Get address from virtual address
771 // This is called from the recompiled JR/JALR instructions
772 static void noinline *get_addr(u_int vaddr, int can_compile)
774 u_int start_page = get_page_prev(vaddr);
775 u_int i, page, end_page = get_page(vaddr);
776 void *found_clean = NULL;
778 stat_inc(stat_jump_in_lookups);
779 for (page = start_page; page <= end_page; page++) {
780 const struct block_info *block;
781 for (block = blocks[page]; block != NULL; block = block->next) {
782 if (vaddr < block->start)
784 if (block->is_dirty || vaddr >= block->start + block->len)
786 for (i = 0; i < block->jump_in_cnt; i++)
787 if (block->jump_in[i].vaddr == vaddr)
789 if (i == block->jump_in_cnt)
791 found_clean = block->jump_in[i].addr;
792 hash_table_add(vaddr, found_clean);
796 found_clean = try_restore_block(vaddr, start_page, end_page);
803 int r = new_recompile_block(vaddr);
805 return ndrc_get_addr_ht(vaddr);
807 return ndrc_get_addr_ht(generate_exception(vaddr));
810 // Look up address in hash table first
811 void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile)
813 //check_for_block_changes(vaddr, vaddr + MAXBLOCK);
814 const struct ht_entry *ht_bin = hash_table_get(vaddr);
815 u_int vaddr_a = vaddr & ~3;
816 stat_inc(stat_ht_lookups);
817 if (ht_bin->vaddr[0] == vaddr_a) return ht_bin->tcaddr[0];
818 if (ht_bin->vaddr[1] == vaddr_a) return ht_bin->tcaddr[1];
819 return get_addr(vaddr, can_compile);
822 void *ndrc_get_addr_ht(u_int vaddr)
824 return ndrc_get_addr_ht_param(vaddr, 1);
827 static void clear_all_regs(signed char regmap[])
829 memset(regmap, -1, sizeof(regmap[0]) * HOST_REGS);
832 // get_reg: get allocated host reg from mips reg
833 // returns -1 if no such mips reg was allocated
834 #if defined(__arm__) && defined(HAVE_ARMV6) && HOST_REGS == 13 && EXCLUDE_REG == 11
836 extern signed char get_reg(const signed char regmap[], signed char r);
840 static signed char get_reg(const signed char regmap[], signed char r)
843 for (hr = 0; hr < HOST_REGS; hr++) {
844 if (hr == EXCLUDE_REG)
854 // get reg suitable for writing
855 static signed char get_reg_w(const signed char regmap[], signed char r)
857 return r == 0 ? -1 : get_reg(regmap, r);
860 // get reg as mask bit (1 << hr)
861 static u_int get_regm(const signed char regmap[], signed char r)
863 return (1u << (get_reg(regmap, r) & 31)) & ~(1u << 31);
866 static signed char get_reg_temp(const signed char regmap[])
869 for (hr = 0; hr < HOST_REGS; hr++) {
870 if (hr == EXCLUDE_REG)
872 if (regmap[hr] == (signed char)-1)
878 // Find a register that is available for two consecutive cycles
879 static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
882 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map1[hr]==r&®map2[hr]==r) return hr;
886 // reverse reg map: mips -> host
887 #define RRMAP_SIZE 64
888 static void make_rregs(const signed char regmap[], signed char rrmap[RRMAP_SIZE],
889 u_int *regs_can_change)
891 u_int r, hr, hr_can_change = 0;
892 memset(rrmap, -1, RRMAP_SIZE);
893 for (hr = 0; hr < HOST_REGS; )
896 rrmap[r & (RRMAP_SIZE - 1)] = hr;
897 // only add mips $1-$31+$lo, others shifted out
898 hr_can_change |= (uint64_t)1 << (hr + ((r - 1) & 32));
900 if (hr == EXCLUDE_REG)
903 hr_can_change |= 1u << (rrmap[33] & 31);
904 hr_can_change |= 1u << (rrmap[CCREG] & 31);
905 hr_can_change &= ~(1u << 31);
906 *regs_can_change = hr_can_change;
909 // same as get_reg, but takes rrmap
910 static signed char get_rreg(signed char rrmap[RRMAP_SIZE], signed char r)
912 assert(0 <= r && r < RRMAP_SIZE);
916 static int count_free_regs(const signed char regmap[])
920 for(hr=0;hr<HOST_REGS;hr++)
922 if(hr!=EXCLUDE_REG) {
923 if(regmap[hr]<0) count++;
929 static void dirty_reg(struct regstat *cur, signed char reg)
933 hr = get_reg(cur->regmap, reg);
938 static void set_const(struct regstat *cur, signed char reg, uint32_t value)
942 hr = get_reg(cur->regmap, reg);
944 cur->isconst |= 1<<hr;
945 current_constmap[hr] = value;
949 static void clear_const(struct regstat *cur, signed char reg)
953 hr = get_reg(cur->regmap, reg);
955 cur->isconst &= ~(1<<hr);
958 static int is_const(const struct regstat *cur, signed char reg)
961 if (reg < 0) return 0;
963 hr = get_reg(cur->regmap, reg);
965 return (cur->isconst>>hr)&1;
969 static uint32_t get_const(const struct regstat *cur, signed char reg)
973 hr = get_reg(cur->regmap, reg);
975 return current_constmap[hr];
977 SysPrintf("Unknown constant in r%d\n", reg);
981 // Least soon needed registers
982 // Look at the next ten instructions and see which registers
983 // will be used. Try not to reallocate these.
984 static void lsn(u_char hsn[], int i)
994 if (dops[i+j].is_ujump)
996 // Don't go past an unconditonal jump
1003 if(dops[i+j].rs1) hsn[dops[i+j].rs1]=j;
1004 if(dops[i+j].rs2) hsn[dops[i+j].rs2]=j;
1005 if(dops[i+j].rt1) hsn[dops[i+j].rt1]=j;
1006 if(dops[i+j].rt2) hsn[dops[i+j].rt2]=j;
1007 if(dops[i+j].itype==STORE || dops[i+j].itype==STORELR) {
1008 // Stores can allocate zero
1009 hsn[dops[i+j].rs1]=j;
1010 hsn[dops[i+j].rs2]=j;
1012 if (ram_offset && (dops[i+j].is_load || dops[i+j].is_store))
1014 // On some architectures stores need invc_ptr
1015 #if defined(HOST_IMM8)
1016 if (dops[i+j].is_store)
1019 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
1027 if(cinfo[i+b].ba>=start && cinfo[i+b].ba<(start+slen*4))
1029 // Follow first branch
1030 int t=(cinfo[i+b].ba-start)>>2;
1031 j=7-b;if(t+j>=slen) j=slen-t-1;
1034 if(dops[t+j].rs1) if(hsn[dops[t+j].rs1]>j+b+2) hsn[dops[t+j].rs1]=j+b+2;
1035 if(dops[t+j].rs2) if(hsn[dops[t+j].rs2]>j+b+2) hsn[dops[t+j].rs2]=j+b+2;
1036 //if(dops[t+j].rt1) if(hsn[dops[t+j].rt1]>j+b+2) hsn[dops[t+j].rt1]=j+b+2;
1037 //if(dops[t+j].rt2) if(hsn[dops[t+j].rt2]>j+b+2) hsn[dops[t+j].rt2]=j+b+2;
1040 // TODO: preferred register based on backward branch
1042 // Delay slot should preferably not overwrite branch conditions or cycle count
1043 if (i > 0 && dops[i-1].is_jump) {
1044 if(dops[i-1].rs1) if(hsn[dops[i-1].rs1]>1) hsn[dops[i-1].rs1]=1;
1045 if(dops[i-1].rs2) if(hsn[dops[i-1].rs2]>1) hsn[dops[i-1].rs2]=1;
1047 // ...or hash tables
1051 // Coprocessor load/store needs FTEMP, even if not declared
1052 if(dops[i].itype==C2LS) {
1055 // Load/store L/R also uses FTEMP as a temporary register
1056 if (dops[i].itype == LOADLR || dops[i].itype == STORELR) {
1059 // Don't remove the miniht registers
1060 if(dops[i].itype==UJUMP||dops[i].itype==RJUMP)
1067 // We only want to allocate registers if we're going to use them again soon
1068 static int needed_again(int r, int i)
1074 if (i > 0 && dops[i-1].is_ujump)
1076 if(cinfo[i-1].ba<start || cinfo[i-1].ba>start+slen*4-4)
1077 return 0; // Don't need any registers if exiting the block
1085 if (dops[i+j].is_ujump)
1087 // Don't go past an unconditonal jump
1091 if (dops[i+j].is_exception)
1098 if(dops[i+j].rs1==r) rn=j;
1099 if(dops[i+j].rs2==r) rn=j;
1100 if((unneeded_reg[i+j]>>r)&1) rn=10;
1101 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
1111 // Try to match register allocations at the end of a loop with those
1113 static int loop_reg(int i, int r, int hr)
1122 if (dops[i+j].is_ujump)
1124 // Don't go past an unconditonal jump
1131 if(dops[i-1].itype==UJUMP||dops[i-1].itype==CJUMP||dops[i-1].itype==SJUMP)
1137 if((unneeded_reg[i+k]>>r)&1) return hr;
1138 if(i+k>=0&&(dops[i+k].itype==UJUMP||dops[i+k].itype==CJUMP||dops[i+k].itype==SJUMP))
1140 if(cinfo[i+k].ba>=start && cinfo[i+k].ba<(start+i*4))
1142 int t=(cinfo[i+k].ba-start)>>2;
1143 int reg=get_reg(regs[t].regmap_entry,r);
1144 if(reg>=0) return reg;
1145 //reg=get_reg(regs[t+1].regmap_entry,r);
1146 //if(reg>=0) return reg;
1154 // Allocate every register, preserving source/target regs
1155 static void alloc_all(struct regstat *cur,int i)
1159 for(hr=0;hr<HOST_REGS;hr++) {
1160 if(hr!=EXCLUDE_REG) {
1161 if((cur->regmap[hr]!=dops[i].rs1)&&(cur->regmap[hr]!=dops[i].rs2)&&
1162 (cur->regmap[hr]!=dops[i].rt1)&&(cur->regmap[hr]!=dops[i].rt2))
1165 cur->dirty&=~(1<<hr);
1168 if(cur->regmap[hr]==0)
1171 cur->dirty&=~(1<<hr);
1178 static int host_tempreg_in_use;
1180 static void host_tempreg_acquire(void)
1182 assert(!host_tempreg_in_use);
1183 host_tempreg_in_use = 1;
1186 static void host_tempreg_release(void)
1188 host_tempreg_in_use = 0;
1191 static void host_tempreg_acquire(void) {}
1192 static void host_tempreg_release(void) {}
1196 extern void gen_interupt();
1197 extern void do_insn_cmp();
1198 #define FUNCNAME(f) { f, " " #f }
1199 static const struct {
1202 } function_names[] = {
1203 FUNCNAME(cc_interrupt),
1204 FUNCNAME(gen_interupt),
1205 FUNCNAME(ndrc_get_addr_ht),
1206 FUNCNAME(jump_handler_read8),
1207 FUNCNAME(jump_handler_read16),
1208 FUNCNAME(jump_handler_read32),
1209 FUNCNAME(jump_handler_write8),
1210 FUNCNAME(jump_handler_write16),
1211 FUNCNAME(jump_handler_write32),
1212 FUNCNAME(ndrc_write_invalidate_one),
1213 FUNCNAME(ndrc_write_invalidate_many),
1214 FUNCNAME(jump_to_new_pc),
1215 FUNCNAME(jump_break),
1216 FUNCNAME(jump_break_ds),
1217 FUNCNAME(jump_syscall),
1218 FUNCNAME(jump_syscall_ds),
1219 FUNCNAME(jump_overflow),
1220 FUNCNAME(jump_overflow_ds),
1221 FUNCNAME(jump_addrerror),
1222 FUNCNAME(jump_addrerror_ds),
1223 FUNCNAME(call_gteStall),
1224 FUNCNAME(new_dyna_leave),
1225 FUNCNAME(pcsx_mtc0),
1226 FUNCNAME(pcsx_mtc0_ds),
1229 FUNCNAME(do_memhandler_pre),
1230 FUNCNAME(do_memhandler_post),
1234 FUNCNAME(do_insn_cmp_arm64),
1236 FUNCNAME(do_insn_cmp),
1241 static const char *func_name(const void *a)
1244 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
1245 if (function_names[i].addr == a)
1246 return function_names[i].name;
1250 static const char *fpofs_name(u_int ofs)
1252 u_int *p = (u_int *)&dynarec_local + ofs/sizeof(u_int);
1253 static char buf[64];
1255 #define ofscase(x) case LO_##x: return " ; " #x
1256 ofscase(next_interupt);
1257 ofscase(cycle_count);
1258 ofscase(last_count);
1259 ofscase(pending_exception);
1270 ofscase(ram_offset);
1274 if (psxRegs.GPR.r <= p && p < &psxRegs.GPR.r[32])
1275 snprintf(buf, sizeof(buf), " ; r%d", (int)(p - psxRegs.GPR.r));
1276 else if (psxRegs.CP0.r <= p && p < &psxRegs.CP0.r[32])
1277 snprintf(buf, sizeof(buf), " ; cp0 $%d", (int)(p - psxRegs.CP0.r));
1278 else if (psxRegs.CP2D.r <= p && p < &psxRegs.CP2D.r[32])
1279 snprintf(buf, sizeof(buf), " ; cp2d $%d", (int)(p - psxRegs.CP2D.r));
1280 else if (psxRegs.CP2C.r <= p && p < &psxRegs.CP2C.r[32])
1281 snprintf(buf, sizeof(buf), " ; cp2c $%d", (int)(p - psxRegs.CP2C.r));
1285 #define func_name(x) ""
1286 #define fpofs_name(x) ""
1290 #include "assem_x86.c"
1293 #include "assem_x64.c"
1296 #include "assem_arm.c"
1299 #include "assem_arm64.c"
1302 static void *get_trampoline(const void *f)
1304 struct ndrc_tramp *tramp = NDRC_WRITE_OFFSET(&ndrc->tramp);
1307 for (i = 0; i < ARRAY_SIZE(tramp->f); i++) {
1308 if (tramp->f[i] == f || tramp->f[i] == NULL)
1311 if (i == ARRAY_SIZE(tramp->f)) {
1312 SysPrintf("trampoline table is full, last func %p\n", f);
1315 if (tramp->f[i] == NULL) {
1316 start_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
1318 end_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
1320 // invalidate the RX mirror (unsure if necessary, but just in case...)
1321 armDCacheFlush(&ndrc->tramp.f[i], sizeof(ndrc->tramp.f[i]));
1324 return &ndrc->tramp.ops[i];
1327 static void emit_far_jump(const void *f)
1329 if (can_jump_or_call(f)) {
1334 f = get_trampoline(f);
1338 static void emit_far_call(const void *f)
1340 if (can_jump_or_call(f)) {
1345 f = get_trampoline(f);
1349 // Check if an address is already compiled
1350 // but don't return addresses which are about to expire from the cache
1351 static void *check_addr(u_int vaddr)
1353 struct ht_entry *ht_bin = hash_table_get(vaddr);
1355 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
1356 if (ht_bin->vaddr[i] == vaddr)
1357 if (doesnt_expire_soon(ht_bin->tcaddr[i]))
1358 return ht_bin->tcaddr[i];
1361 // refactor to get_addr_nocompile?
1362 u_int start_page = get_page_prev(vaddr);
1363 u_int page, end_page = get_page(vaddr);
1365 stat_inc(stat_jump_in_lookups);
1366 for (page = start_page; page <= end_page; page++) {
1367 const struct block_info *block;
1368 for (block = blocks[page]; block != NULL; block = block->next) {
1369 if (vaddr < block->start)
1371 if (block->is_dirty || vaddr >= block->start + block->len)
1373 if (!doesnt_expire_soon(ndrc->translation_cache + block->tc_offs))
1375 for (i = 0; i < block->jump_in_cnt; i++)
1376 if (block->jump_in[i].vaddr == vaddr)
1378 if (i == block->jump_in_cnt)
1381 // Update existing entry with current address
1382 void *addr = block->jump_in[i].addr;
1383 if (ht_bin->vaddr[0] == vaddr) {
1384 ht_bin->tcaddr[0] = addr;
1387 if (ht_bin->vaddr[1] == vaddr) {
1388 ht_bin->tcaddr[1] = addr;
1391 // Insert into hash table with low priority.
1392 // Don't evict existing entries, as they are probably
1393 // addresses that are being accessed frequently.
1394 if (ht_bin->vaddr[0] == -1) {
1395 ht_bin->vaddr[0] = vaddr;
1396 ht_bin->tcaddr[0] = addr;
1398 else if (ht_bin->vaddr[1] == -1) {
1399 ht_bin->vaddr[1] = vaddr;
1400 ht_bin->tcaddr[1] = addr;
1408 static void blocks_clear(struct block_info **head)
1410 struct block_info *cur, *next;
1412 if ((cur = *head)) {
1422 static int blocks_remove_matching_addrs(struct block_info **head,
1423 u_int base_offs, int shift)
1425 struct block_info *next;
1428 if ((((*head)->tc_offs ^ base_offs) >> shift) == 0) {
1429 inv_debug("EXP: rm block %08x (tc_offs %x)\n", (*head)->start, (*head)->tc_offs);
1430 invalidate_block(*head);
1431 next = (*head)->next;
1434 stat_dec(stat_blocks);
1439 head = &((*head)->next);
1445 // This is called when we write to a compiled block (see do_invstub)
1446 static void unlink_jumps_vaddr_range(u_int start, u_int end)
1448 u_int page, start_page = get_page(start), end_page = get_page(end - 1);
1451 for (page = start_page; page <= end_page; page++) {
1452 struct jump_info *ji = jumps[page];
1455 for (i = 0; i < ji->count; ) {
1456 if (ji->e[i].target_vaddr < start || ji->e[i].target_vaddr >= end) {
1461 inv_debug("INV: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr,
1462 (u_char *)ji->e[i].stub - ndrc->translation_cache);
1463 void *host_addr = find_extjump_insn(ji->e[i].stub);
1464 mark_clear_cache(host_addr);
1465 set_jump_target(host_addr, ji->e[i].stub); // point back to dyna_linker stub
1467 stat_dec(stat_links);
1469 if (i < ji->count) {
1470 ji->e[i] = ji->e[ji->count];
1478 static void unlink_jumps_tc_range(struct jump_info *ji, u_int base_offs, int shift)
1483 for (i = 0; i < ji->count; ) {
1484 u_int tc_offs = (u_char *)ji->e[i].stub - ndrc->translation_cache;
1485 if (((tc_offs ^ base_offs) >> shift) != 0) {
1490 inv_debug("EXP: rm link to %08x (tc_offs %x)\n", ji->e[i].target_vaddr, tc_offs);
1491 stat_dec(stat_links);
1493 if (i < ji->count) {
1494 ji->e[i] = ji->e[ji->count];
1501 static void invalidate_block(struct block_info *block)
1505 block->is_dirty = 1;
1506 unlink_jumps_vaddr_range(block->start, block->start + block->len);
1507 for (i = 0; i < block->jump_in_cnt; i++)
1508 hash_table_remove(block->jump_in[i].vaddr);
1511 static int invalidate_range(u_int start, u_int end,
1512 u32 *inv_start_ret, u32 *inv_end_ret)
1514 struct block_info *last_block = NULL;
1515 u_int start_page = get_page_prev(start);
1516 u_int end_page = get_page(end - 1);
1517 u_int start_m = pmmask(start);
1518 u_int end_m = pmmask(end - 1);
1519 u_int inv_start, inv_end;
1520 u_int blk_start_m, blk_end_m;
1524 // additional area without code (to supplement invalid_code[]), [start, end)
1525 // avoids excessive ndrc_write_invalidate*() calls
1526 inv_start = start_m & ~0xfff;
1527 inv_end = end_m | 0xfff;
1529 for (page = start_page; page <= end_page; page++) {
1530 struct block_info *block;
1531 for (block = blocks[page]; block != NULL; block = block->next) {
1532 if (block->is_dirty)
1535 blk_end_m = pmmask(block->start + block->len);
1536 if (blk_end_m <= start_m) {
1537 inv_start = max(inv_start, blk_end_m);
1540 blk_start_m = pmmask(block->start);
1541 if (end_m <= blk_start_m) {
1542 inv_end = min(inv_end, blk_start_m - 1);
1545 if (!block->source) // "hack" block - leave it alone
1549 invalidate_block(block);
1550 stat_inc(stat_inv_hits);
1554 if (!hit && last_block && last_block->source) {
1555 // could be some leftover unused block, uselessly trapping writes
1556 last_block->inv_near_misses++;
1557 if (last_block->inv_near_misses > 128) {
1558 invalidate_block(last_block);
1559 stat_inc(stat_inv_hits);
1566 memset(mini_ht, -1, sizeof(mini_ht));
1570 if (inv_start <= (start_m & ~0xfff) && inv_end >= (start_m | 0xfff))
1571 // the whole page is empty now
1572 mark_invalid_code(start, 1, 1);
1574 if (inv_start_ret) *inv_start_ret = inv_start | (start & 0xe0000000);
1575 if (inv_end_ret) *inv_end_ret = inv_end | (end & 0xe0000000);
1579 void new_dynarec_invalidate_range(unsigned int start, unsigned int end)
1581 invalidate_range(start, end, NULL, NULL);
1584 static void ndrc_write_invalidate_many(u_int start, u_int end)
1586 // this check is done by the caller
1587 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1588 int ret = invalidate_range(start, end, &inv_code_start, &inv_code_end);
1590 int invc = invalid_code[start >> 12];
1591 u_int len = end - start;
1593 printf("INV ADDR: %08x/%02x hit %d blocks\n", start, len, ret);
1595 printf("INV ADDR: %08x/%02x miss, inv %08x-%08x invc %d->%d\n", start, len,
1596 inv_code_start, inv_code_end, invc, invalid_code[start >> 12]);
1597 check_for_block_changes(start, end);
1599 stat_inc(stat_inv_addr_calls);
1603 void ndrc_write_invalidate_one(u_int addr)
1605 ndrc_write_invalidate_many(addr, addr + 4);
1608 // This is called when loading a save state.
1609 // Anything could have changed, so invalidate everything.
1610 void new_dynarec_invalidate_all_pages(void)
1612 struct block_info *block;
1614 for (page = 0; page < ARRAY_SIZE(blocks); page++) {
1615 for (block = blocks[page]; block != NULL; block = block->next) {
1616 if (block->is_dirty)
1618 if (!block->source) // hack block?
1620 invalidate_block(block);
1625 memset(mini_ht, -1, sizeof(mini_ht));
1630 // Add an entry to jump_out after making a link
1631 // src should point to code by emit_extjump()
1632 void ndrc_add_jump_out(u_int vaddr, void *src)
1634 inv_debug("ndrc_add_jump_out: %p -> %x\n", src, vaddr);
1635 u_int page = get_page(vaddr);
1636 struct jump_info *ji;
1638 stat_inc(stat_links);
1639 check_extjump2(src);
1642 ji = malloc(sizeof(*ji) + sizeof(ji->e[0]) * 16);
1646 else if (ji->count >= ji->alloc) {
1648 ji = realloc(ji, sizeof(*ji) + sizeof(ji->e[0]) * ji->alloc);
1651 ji->e[ji->count].target_vaddr = vaddr;
1652 ji->e[ji->count].stub = src;
1656 /* Register allocation */
1658 static void alloc_set(struct regstat *cur, int reg, int hr)
1660 cur->regmap[hr] = reg;
1661 cur->dirty &= ~(1u << hr);
1662 cur->isconst &= ~(1u << hr);
1663 cur->noevict |= 1u << hr;
1666 static void evict_alloc_reg(struct regstat *cur, int i, int reg, int preferred_hr)
1668 u_char hsn[MAXREG+1];
1670 memset(hsn, 10, sizeof(hsn));
1672 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1674 // Don't evict the cycle count at entry points, otherwise the entry
1675 // stub will have to write it.
1676 if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
1677 if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
1680 // Alloc preferred register if available
1681 if (!((cur->noevict >> preferred_hr) & 1)
1682 && hsn[cur->regmap[preferred_hr]] == j)
1684 alloc_set(cur, reg, preferred_hr);
1687 for(r=1;r<=MAXREG;r++)
1689 if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
1690 for(hr=0;hr<HOST_REGS;hr++) {
1691 if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
1693 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1694 if(cur->regmap[hr]==r) {
1695 alloc_set(cur, reg, hr);
1706 for(r=1;r<=MAXREG;r++)
1709 for(hr=0;hr<HOST_REGS;hr++) {
1710 if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
1712 if(cur->regmap[hr]==r) {
1713 alloc_set(cur, reg, hr);
1720 SysPrintf("This shouldn't happen (evict_alloc_reg)\n");
1724 // Note: registers are allocated clean (unmodified state)
1725 // if you intend to modify the register, you must call dirty_reg().
1726 static void alloc_reg(struct regstat *cur,int i,signed char reg)
1729 int preferred_reg = PREFERRED_REG_FIRST
1730 + reg % (PREFERRED_REG_LAST - PREFERRED_REG_FIRST + 1);
1731 if (reg == CCREG) preferred_reg = HOST_CCREG;
1732 if (reg == PTEMP || reg == FTEMP) preferred_reg = 12;
1733 assert(PREFERRED_REG_FIRST != EXCLUDE_REG && EXCLUDE_REG != HOST_REGS);
1736 // Don't allocate unused registers
1737 if((cur->u>>reg)&1) return;
1739 // see if it's already allocated
1740 if ((hr = get_reg(cur->regmap, reg)) >= 0) {
1741 cur->noevict |= 1u << hr;
1745 // Keep the same mapping if the register was already allocated in a loop
1746 preferred_reg = loop_reg(i,reg,preferred_reg);
1748 // Try to allocate the preferred register
1749 if (cur->regmap[preferred_reg] == -1) {
1750 alloc_set(cur, reg, preferred_reg);
1753 r=cur->regmap[preferred_reg];
1756 alloc_set(cur, reg, preferred_reg);
1760 // Clear any unneeded registers
1761 // We try to keep the mapping consistent, if possible, because it
1762 // makes branches easier (especially loops). So we try to allocate
1763 // first (see above) before removing old mappings. If this is not
1764 // possible then go ahead and clear out the registers that are no
1766 for(hr=0;hr<HOST_REGS;hr++)
1771 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1775 // Try to allocate any available register, but prefer
1776 // registers that have not been used recently.
1778 for (hr = PREFERRED_REG_FIRST; ; ) {
1779 if (cur->regmap[hr] < 0) {
1780 int oldreg = regs[i-1].regmap[hr];
1781 if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
1782 && oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
1784 alloc_set(cur, reg, hr);
1789 if (hr == EXCLUDE_REG)
1791 if (hr == HOST_REGS)
1793 if (hr == PREFERRED_REG_FIRST)
1798 // Try to allocate any available register
1799 for (hr = PREFERRED_REG_FIRST; ; ) {
1800 if (cur->regmap[hr] < 0) {
1801 alloc_set(cur, reg, hr);
1805 if (hr == EXCLUDE_REG)
1807 if (hr == HOST_REGS)
1809 if (hr == PREFERRED_REG_FIRST)
1813 // Ok, now we have to evict someone
1814 // Pick a register we hopefully won't need soon
1815 evict_alloc_reg(cur, i, reg, preferred_reg);
1818 // Allocate a temporary register. This is done without regard to
1819 // dirty status or whether the register we request is on the unneeded list
1820 // Note: This will only allocate one register, even if called multiple times
1821 static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1825 // see if it's already allocated
1826 for (hr = 0; hr < HOST_REGS; hr++)
1828 if (hr != EXCLUDE_REG && cur->regmap[hr] == reg) {
1829 cur->noevict |= 1u << hr;
1834 // Try to allocate any available register
1835 for(hr=HOST_REGS-1;hr>=0;hr--) {
1836 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1837 alloc_set(cur, reg, hr);
1842 // Find an unneeded register
1843 for(hr=HOST_REGS-1;hr>=0;hr--)
1849 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1850 alloc_set(cur, reg, hr);
1857 // Ok, now we have to evict someone
1858 // Pick a register we hopefully won't need soon
1859 evict_alloc_reg(cur, i, reg, 0);
1862 static void mov_alloc(struct regstat *current,int i)
1864 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) {
1865 alloc_cc(current,i); // for stalls
1866 dirty_reg(current,CCREG);
1869 // Note: Don't need to actually alloc the source registers
1870 //alloc_reg(current,i,dops[i].rs1);
1871 alloc_reg(current,i,dops[i].rt1);
1873 clear_const(current,dops[i].rs1);
1874 clear_const(current,dops[i].rt1);
1875 dirty_reg(current,dops[i].rt1);
1878 static void shiftimm_alloc(struct regstat *current,int i)
1880 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
1883 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1884 else dops[i].use_lt1=!!dops[i].rs1;
1885 alloc_reg(current,i,dops[i].rt1);
1886 dirty_reg(current,dops[i].rt1);
1887 if(is_const(current,dops[i].rs1)) {
1888 int v=get_const(current,dops[i].rs1);
1889 if(dops[i].opcode2==0x00) set_const(current,dops[i].rt1,v<<cinfo[i].imm);
1890 if(dops[i].opcode2==0x02) set_const(current,dops[i].rt1,(u_int)v>>cinfo[i].imm);
1891 if(dops[i].opcode2==0x03) set_const(current,dops[i].rt1,v>>cinfo[i].imm);
1893 else clear_const(current,dops[i].rt1);
1898 clear_const(current,dops[i].rs1);
1899 clear_const(current,dops[i].rt1);
1902 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
1906 if(dops[i].opcode2==0x3c) // DSLL32
1910 if(dops[i].opcode2==0x3e) // DSRL32
1914 if(dops[i].opcode2==0x3f) // DSRA32
1920 static void shift_alloc(struct regstat *current,int i)
1923 if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
1924 if(dops[i].rs2) alloc_reg(current,i,dops[i].rs2);
1925 alloc_reg(current,i,dops[i].rt1);
1926 if(dops[i].rt1==dops[i].rs2) {
1927 alloc_reg_temp(current,i,-1);
1928 cinfo[i].min_free_regs=1;
1930 clear_const(current,dops[i].rs1);
1931 clear_const(current,dops[i].rs2);
1932 clear_const(current,dops[i].rt1);
1933 dirty_reg(current,dops[i].rt1);
1937 static void alu_alloc(struct regstat *current,int i)
1939 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
1941 if(dops[i].rs1&&dops[i].rs2) {
1942 alloc_reg(current,i,dops[i].rs1);
1943 alloc_reg(current,i,dops[i].rs2);
1946 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1947 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
1949 alloc_reg(current,i,dops[i].rt1);
1951 if (dops[i].may_except) {
1952 alloc_cc_optional(current, i); // for exceptions
1953 alloc_reg_temp(current, i, -1);
1954 cinfo[i].min_free_regs = 1;
1957 else if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
1959 alloc_reg(current,i,dops[i].rs1);
1960 alloc_reg(current,i,dops[i].rs2);
1961 alloc_reg(current,i,dops[i].rt1);
1964 else if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
1966 if(dops[i].rs1&&dops[i].rs2) {
1967 alloc_reg(current,i,dops[i].rs1);
1968 alloc_reg(current,i,dops[i].rs2);
1972 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1973 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
1975 alloc_reg(current,i,dops[i].rt1);
1978 clear_const(current,dops[i].rs1);
1979 clear_const(current,dops[i].rs2);
1980 clear_const(current,dops[i].rt1);
1981 dirty_reg(current,dops[i].rt1);
1984 static void imm16_alloc(struct regstat *current,int i)
1986 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1987 else dops[i].use_lt1=!!dops[i].rs1;
1988 if(dops[i].rt1) alloc_reg(current,i,dops[i].rt1);
1989 if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
1990 clear_const(current,dops[i].rs1);
1991 clear_const(current,dops[i].rt1);
1993 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
1994 if(is_const(current,dops[i].rs1)) {
1995 int v=get_const(current,dops[i].rs1);
1996 if(dops[i].opcode==0x0c) set_const(current,dops[i].rt1,v&cinfo[i].imm);
1997 if(dops[i].opcode==0x0d) set_const(current,dops[i].rt1,v|cinfo[i].imm);
1998 if(dops[i].opcode==0x0e) set_const(current,dops[i].rt1,v^cinfo[i].imm);
2000 else clear_const(current,dops[i].rt1);
2002 else if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
2003 if(is_const(current,dops[i].rs1)) {
2004 int v=get_const(current,dops[i].rs1);
2005 set_const(current,dops[i].rt1,v+cinfo[i].imm);
2007 else clear_const(current,dops[i].rt1);
2008 if (dops[i].may_except) {
2009 alloc_cc_optional(current, i); // for exceptions
2010 alloc_reg_temp(current, i, -1);
2011 cinfo[i].min_free_regs = 1;
2015 set_const(current,dops[i].rt1,cinfo[i].imm<<16); // LUI
2017 dirty_reg(current,dops[i].rt1);
2020 static void load_alloc(struct regstat *current,int i)
2023 clear_const(current,dops[i].rt1);
2024 //if(dops[i].rs1!=dops[i].rt1&&needed_again(dops[i].rs1,i)) clear_const(current,dops[i].rs1); // Does this help or hurt?
2025 if(!dops[i].rs1) current->u&=~1LL; // Allow allocating r0 if it's the source register
2026 if (needed_again(dops[i].rs1, i))
2027 alloc_reg(current, i, dops[i].rs1);
2029 alloc_reg(current, i, ROREG);
2030 if (dops[i].may_except) {
2031 alloc_cc_optional(current, i); // for exceptions
2034 if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
2035 alloc_reg(current,i,dops[i].rt1);
2036 assert(get_reg_w(current->regmap, dops[i].rt1)>=0);
2037 dirty_reg(current,dops[i].rt1);
2038 // LWL/LWR need a temporary register for the old value
2039 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
2041 alloc_reg(current,i,FTEMP);
2047 // Load to r0 or unneeded register (dummy load)
2048 // but we still need a register to calculate the address
2049 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
2050 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
2054 alloc_reg_temp(current, i, -1);
2055 cinfo[i].min_free_regs = 1;
2059 // this may eat up to 7 registers
2060 static void store_alloc(struct regstat *current, int i)
2062 clear_const(current,dops[i].rs2);
2063 if(!(dops[i].rs2)) current->u&=~1LL; // Allow allocating r0 if necessary
2064 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
2065 alloc_reg(current,i,dops[i].rs2);
2067 alloc_reg(current, i, ROREG);
2068 #if defined(HOST_IMM8)
2069 // On CPUs without 32-bit immediates we need a pointer to invalid_code
2070 alloc_reg(current, i, INVCP);
2072 if (dops[i].opcode == 0x2a || dops[i].opcode == 0x2e) { // SWL/SWL
2073 alloc_reg(current,i,FTEMP);
2075 if (dops[i].may_except)
2076 alloc_cc_optional(current, i); // for exceptions
2077 // We need a temporary register for address generation
2078 alloc_reg_temp(current,i,-1);
2079 cinfo[i].min_free_regs=1;
2082 static void c2ls_alloc(struct regstat *current, int i)
2084 clear_const(current,dops[i].rt1);
2085 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
2086 alloc_reg(current,i,FTEMP);
2088 alloc_reg(current, i, ROREG);
2089 #if defined(HOST_IMM8)
2090 // On CPUs without 32-bit immediates we need a pointer to invalid_code
2091 if (dops[i].opcode == 0x3a) // SWC2
2092 alloc_reg(current,i,INVCP);
2094 if (dops[i].may_except)
2095 alloc_cc_optional(current, i); // for exceptions
2096 // We need a temporary register for address generation
2097 alloc_reg_temp(current,i,-1);
2098 cinfo[i].min_free_regs=1;
2101 #ifndef multdiv_alloc
2102 static void multdiv_alloc(struct regstat *current,int i)
2108 clear_const(current,dops[i].rs1);
2109 clear_const(current,dops[i].rs2);
2110 alloc_cc(current,i); // for stalls
2111 dirty_reg(current,CCREG);
2112 if(dops[i].rs1&&dops[i].rs2)
2114 current->u&=~(1LL<<HIREG);
2115 current->u&=~(1LL<<LOREG);
2116 alloc_reg(current,i,HIREG);
2117 alloc_reg(current,i,LOREG);
2118 alloc_reg(current,i,dops[i].rs1);
2119 alloc_reg(current,i,dops[i].rs2);
2120 dirty_reg(current,HIREG);
2121 dirty_reg(current,LOREG);
2125 // Multiply by zero is zero.
2126 // MIPS does not have a divide by zero exception.
2127 alloc_reg(current,i,HIREG);
2128 alloc_reg(current,i,LOREG);
2129 dirty_reg(current,HIREG);
2130 dirty_reg(current,LOREG);
2131 if (dops[i].rs1 && ((dops[i].opcode2 & 0x3e) == 0x1a)) // div(u) 0
2132 alloc_reg(current, i, dops[i].rs1);
2137 static void cop0_alloc(struct regstat *current,int i)
2139 if(dops[i].opcode2==0) // MFC0
2142 clear_const(current,dops[i].rt1);
2143 alloc_reg(current,i,dops[i].rt1);
2144 dirty_reg(current,dops[i].rt1);
2147 else if(dops[i].opcode2==4) // MTC0
2149 if (((source[i]>>11)&0x1e) == 12) {
2150 alloc_cc(current, i);
2151 dirty_reg(current, CCREG);
2154 clear_const(current,dops[i].rs1);
2155 alloc_reg(current,i,dops[i].rs1);
2156 alloc_all(current,i);
2159 alloc_all(current,i); // FIXME: Keep r0
2161 alloc_reg(current,i,0);
2163 cinfo[i].min_free_regs = HOST_REGS;
2167 static void rfe_alloc(struct regstat *current, int i)
2169 alloc_all(current, i);
2170 cinfo[i].min_free_regs = HOST_REGS;
2173 static void cop2_alloc(struct regstat *current,int i)
2175 if (dops[i].opcode2 < 3) // MFC2/CFC2
2177 alloc_cc(current,i); // for stalls
2178 dirty_reg(current,CCREG);
2180 clear_const(current,dops[i].rt1);
2181 alloc_reg(current,i,dops[i].rt1);
2182 dirty_reg(current,dops[i].rt1);
2185 else if (dops[i].opcode2 > 3) // MTC2/CTC2
2188 clear_const(current,dops[i].rs1);
2189 alloc_reg(current,i,dops[i].rs1);
2193 alloc_reg(current,i,0);
2196 alloc_reg_temp(current,i,-1);
2197 cinfo[i].min_free_regs=1;
2200 static void c2op_alloc(struct regstat *current,int i)
2202 alloc_cc(current,i); // for stalls
2203 dirty_reg(current,CCREG);
2204 alloc_reg_temp(current,i,-1);
2207 static void syscall_alloc(struct regstat *current,int i)
2209 alloc_cc(current,i);
2210 dirty_reg(current,CCREG);
2211 alloc_all(current,i);
2212 cinfo[i].min_free_regs=HOST_REGS;
2216 static void delayslot_alloc(struct regstat *current,int i)
2218 switch(dops[i].itype) {
2226 imm16_alloc(current,i);
2230 load_alloc(current,i);
2234 store_alloc(current,i);
2237 alu_alloc(current,i);
2240 shift_alloc(current,i);
2243 multdiv_alloc(current,i);
2246 shiftimm_alloc(current,i);
2249 mov_alloc(current,i);
2252 cop0_alloc(current,i);
2255 rfe_alloc(current,i);
2258 cop2_alloc(current,i);
2261 c2ls_alloc(current,i);
2264 c2op_alloc(current,i);
2269 static void add_stub(enum stub_type type, void *addr, void *retaddr,
2270 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
2272 assert(stubcount < ARRAY_SIZE(stubs));
2273 stubs[stubcount].type = type;
2274 stubs[stubcount].addr = addr;
2275 stubs[stubcount].retaddr = retaddr;
2276 stubs[stubcount].a = a;
2277 stubs[stubcount].b = b;
2278 stubs[stubcount].c = c;
2279 stubs[stubcount].d = d;
2280 stubs[stubcount].e = e;
2284 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
2285 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
2287 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
2290 // Write out a single register
2291 static void wb_register(signed char r, const signed char regmap[], uint64_t dirty)
2294 for(hr=0;hr<HOST_REGS;hr++) {
2295 if(hr!=EXCLUDE_REG) {
2298 assert(regmap[hr]<64);
2299 emit_storereg(r,hr);
2306 static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
2308 //if(dirty_pre==dirty) return;
2310 for (hr = 0; hr < HOST_REGS; hr++) {
2312 if (r < 1 || r > 33 || ((u >> r) & 1))
2314 if (((dirty_pre & ~dirty) >> hr) & 1)
2315 emit_storereg(r, hr);
2320 static void pass_args(int a0, int a1)
2324 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2326 else if(a0!=0&&a1==0) {
2328 if (a0>=0) emit_mov(a0,0);
2331 if(a0>=0&&a0!=0) emit_mov(a0,0);
2332 if(a1>=0&&a1!=1) emit_mov(a1,1);
2336 static void alu_assemble(int i, const struct regstat *i_regs, int ccadj_)
2338 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
2339 int do_oflow = dops[i].may_except; // ADD/SUB with exceptions enabled
2340 if (dops[i].rt1 || do_oflow) {
2341 int do_exception_check = 0;
2342 signed char s1, s2, t, tmp;
2343 t = get_reg_w(i_regs->regmap, dops[i].rt1);
2344 tmp = get_reg_temp(i_regs->regmap);
2347 if (t < 0 && do_oflow)
2350 s1 = get_reg(i_regs->regmap, dops[i].rs1);
2351 s2 = get_reg(i_regs->regmap, dops[i].rs2);
2352 if (dops[i].rs1 && dops[i].rs2) {
2355 if (dops[i].opcode2 & 2) {
2357 emit_subs(s1, s2, tmp);
2358 do_exception_check = 1;
2365 emit_adds(s1, s2, tmp);
2366 do_exception_check = 1;
2372 else if(dops[i].rs1) {
2373 if(s1>=0) emit_mov(s1,t);
2374 else emit_loadreg(dops[i].rs1,t);
2376 else if(dops[i].rs2) {
2378 emit_loadreg(dops[i].rs2, t);
2381 if (dops[i].opcode2 & 2) {
2384 do_exception_check = 1;
2395 if (do_exception_check) {
2398 if (t >= 0 && tmp != t)
2400 add_stub_r(OVERFLOW_STUB, jaddr, out, i, 0, i_regs, ccadj_, 0);
2404 else if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
2406 signed char s1l,s2l,t;
2408 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2411 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2412 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2413 if(dops[i].rs2==0) // rx<r0
2415 if(dops[i].opcode2==0x2a&&dops[i].rs1!=0) { // SLT
2417 emit_shrimm(s1l,31,t);
2419 else // SLTU (unsigned can not be less than zero, 0<0)
2422 else if(dops[i].rs1==0) // r0<rx
2425 if(dops[i].opcode2==0x2a) // SLT
2426 emit_set_gz32(s2l,t);
2427 else // SLTU (set if not zero)
2428 emit_set_nz32(s2l,t);
2431 assert(s1l>=0);assert(s2l>=0);
2432 if(dops[i].opcode2==0x2a) // SLT
2433 emit_set_if_less32(s1l,s2l,t);
2435 emit_set_if_carry32(s1l,s2l,t);
2441 else if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
2443 signed char s1l,s2l,tl;
2444 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
2447 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2448 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2449 if(dops[i].rs1&&dops[i].rs2) {
2452 if(dops[i].opcode2==0x24) { // AND
2453 emit_and(s1l,s2l,tl);
2455 if(dops[i].opcode2==0x25) { // OR
2456 emit_or(s1l,s2l,tl);
2458 if(dops[i].opcode2==0x26) { // XOR
2459 emit_xor(s1l,s2l,tl);
2461 if(dops[i].opcode2==0x27) { // NOR
2462 emit_or(s1l,s2l,tl);
2468 if(dops[i].opcode2==0x24) { // AND
2471 if(dops[i].opcode2==0x25||dops[i].opcode2==0x26) { // OR/XOR
2473 if(s1l>=0) emit_mov(s1l,tl);
2474 else emit_loadreg(dops[i].rs1,tl); // CHECK: regmap_entry?
2478 if(s2l>=0) emit_mov(s2l,tl);
2479 else emit_loadreg(dops[i].rs2,tl); // CHECK: regmap_entry?
2481 else emit_zeroreg(tl);
2483 if(dops[i].opcode2==0x27) { // NOR
2485 if(s1l>=0) emit_not(s1l,tl);
2487 emit_loadreg(dops[i].rs1,tl);
2493 if(s2l>=0) emit_not(s2l,tl);
2495 emit_loadreg(dops[i].rs2,tl);
2499 else emit_movimm(-1,tl);
2508 static void imm16_assemble(int i, const struct regstat *i_regs, int ccadj_)
2510 if (dops[i].opcode==0x0f) { // LUI
2513 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2516 if(!((i_regs->isconst>>t)&1))
2517 emit_movimm(cinfo[i].imm<<16,t);
2521 if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
2522 int is_addi = dops[i].may_except;
2523 if (dops[i].rt1 || is_addi) {
2524 signed char s, t, tmp;
2525 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2526 s=get_reg(i_regs->regmap,dops[i].rs1);
2528 tmp = get_reg_temp(i_regs->regmap);
2534 if(!((i_regs->isconst>>t)&1)) {
2535 int sum, do_exception_check = 0;
2537 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2539 emit_addimm_and_set_flags3(t, cinfo[i].imm, tmp);
2540 do_exception_check = 1;
2543 emit_addimm(t, cinfo[i].imm, t);
2545 if (!((i_regs->wasconst >> s) & 1)) {
2547 emit_addimm_and_set_flags3(s, cinfo[i].imm, tmp);
2548 do_exception_check = 1;
2551 emit_addimm(s, cinfo[i].imm, t);
2554 int oflow = add_overflow(constmap[i][s], cinfo[i].imm, sum);
2555 if (is_addi && oflow)
2556 do_exception_check = 2;
2558 emit_movimm(sum, t);
2561 if (do_exception_check) {
2563 if (do_exception_check == 2)
2570 add_stub_r(OVERFLOW_STUB, jaddr, out, i, 0, i_regs, ccadj_, 0);
2576 if(!((i_regs->isconst>>t)&1))
2577 emit_movimm(cinfo[i].imm,t);
2582 else if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
2584 //assert(dops[i].rs1!=0); // r0 might be valid, but it's probably a bug
2586 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2587 sl=get_reg(i_regs->regmap,dops[i].rs1);
2591 if(dops[i].opcode==0x0a) { // SLTI
2593 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2594 emit_slti32(t,cinfo[i].imm,t);
2596 emit_slti32(sl,cinfo[i].imm,t);
2601 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2602 emit_sltiu32(t,cinfo[i].imm,t);
2604 emit_sltiu32(sl,cinfo[i].imm,t);
2608 // SLTI(U) with r0 is just stupid,
2609 // nonetheless examples can be found
2610 if(dops[i].opcode==0x0a) // SLTI
2611 if(0<cinfo[i].imm) emit_movimm(1,t);
2612 else emit_zeroreg(t);
2615 if(cinfo[i].imm) emit_movimm(1,t);
2616 else emit_zeroreg(t);
2622 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
2625 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
2626 sl=get_reg(i_regs->regmap,dops[i].rs1);
2627 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2628 if(dops[i].opcode==0x0c) //ANDI
2632 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
2633 emit_andimm(tl,cinfo[i].imm,tl);
2635 if(!((i_regs->wasconst>>sl)&1))
2636 emit_andimm(sl,cinfo[i].imm,tl);
2638 emit_movimm(constmap[i][sl]&cinfo[i].imm,tl);
2648 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
2650 if(dops[i].opcode==0x0d) { // ORI
2652 emit_orimm(tl,cinfo[i].imm,tl);
2654 if(!((i_regs->wasconst>>sl)&1))
2655 emit_orimm(sl,cinfo[i].imm,tl);
2657 emit_movimm(constmap[i][sl]|cinfo[i].imm,tl);
2660 if(dops[i].opcode==0x0e) { // XORI
2662 emit_xorimm(tl,cinfo[i].imm,tl);
2664 if(!((i_regs->wasconst>>sl)&1))
2665 emit_xorimm(sl,cinfo[i].imm,tl);
2667 emit_movimm(constmap[i][sl]^cinfo[i].imm,tl);
2672 emit_movimm(cinfo[i].imm,tl);
2680 static void shiftimm_assemble(int i, const struct regstat *i_regs)
2682 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
2686 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2687 s=get_reg(i_regs->regmap,dops[i].rs1);
2689 if(t>=0&&!((i_regs->isconst>>t)&1)){
2696 if(s<0&&i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2698 if(dops[i].opcode2==0) // SLL
2700 emit_shlimm(s<0?t:s,cinfo[i].imm,t);
2702 if(dops[i].opcode2==2) // SRL
2704 emit_shrimm(s<0?t:s,cinfo[i].imm,t);
2706 if(dops[i].opcode2==3) // SRA
2708 emit_sarimm(s<0?t:s,cinfo[i].imm,t);
2712 if(s>=0 && s!=t) emit_mov(s,t);
2716 //emit_storereg(dops[i].rt1,t); //DEBUG
2719 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
2723 if(dops[i].opcode2==0x3c) // DSLL32
2727 if(dops[i].opcode2==0x3e) // DSRL32
2731 if(dops[i].opcode2==0x3f) // DSRA32
2737 #ifndef shift_assemble
2738 static void shift_assemble(int i, const struct regstat *i_regs)
2740 signed char s,t,shift;
2741 if (dops[i].rt1 == 0)
2743 assert(dops[i].opcode2<=0x07); // SLLV/SRLV/SRAV
2744 t = get_reg(i_regs->regmap, dops[i].rt1);
2745 s = get_reg(i_regs->regmap, dops[i].rs1);
2746 shift = get_reg(i_regs->regmap, dops[i].rs2);
2752 else if(dops[i].rs2==0) {
2754 if(s!=t) emit_mov(s,t);
2757 host_tempreg_acquire();
2758 emit_andimm(shift,31,HOST_TEMPREG);
2759 switch(dops[i].opcode2) {
2761 emit_shl(s,HOST_TEMPREG,t);
2764 emit_shr(s,HOST_TEMPREG,t);
2767 emit_sar(s,HOST_TEMPREG,t);
2772 host_tempreg_release();
2786 static int get_ptr_mem_type(u_int a)
2788 if(a < 0x00200000) {
2789 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2790 // return wrong, must use memhandler for BIOS self-test to pass
2791 // 007 does similar stuff from a00 mirror, weird stuff
2795 if(0x1f800000 <= a && a < 0x1f801000)
2797 if(0x80200000 <= a && a < 0x80800000)
2799 if(0xa0000000 <= a && a < 0xa0200000)
2804 static int get_ro_reg(const struct regstat *i_regs, int host_tempreg_free)
2806 int r = get_reg(i_regs->regmap, ROREG);
2807 if (r < 0 && host_tempreg_free) {
2808 host_tempreg_acquire();
2809 emit_loadreg(ROREG, r = HOST_TEMPREG);
2816 static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
2817 int addr, int *offset_reg, int *addr_reg_override, int ccadj_)
2821 int mr = dops[i].rs1;
2824 if(((smrv_strong|smrv_weak)>>mr)&1) {
2825 type=get_ptr_mem_type(smrv[mr]);
2826 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2829 // use the mirror we are running on
2830 type=get_ptr_mem_type(start);
2831 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2834 if (dops[i].may_except) {
2836 u_int op = dops[i].opcode;
2837 int mask = ((op & 0x37) == 0x21 || op == 0x25) ? 1 : 3; // LH/SH/LHU
2839 emit_testimm(addr, mask);
2842 add_stub_r(ALIGNMENT_STUB, jaddr2, out, i, addr, i_regs, ccadj_, 0);
2845 if(type==MTYPE_8020) { // RAM 80200000+ mirror
2846 host_tempreg_acquire();
2847 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2848 addr=*addr_reg_override=HOST_TEMPREG;
2851 else if(type==MTYPE_0000) { // RAM 0 mirror
2852 host_tempreg_acquire();
2853 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2854 addr=*addr_reg_override=HOST_TEMPREG;
2857 else if(type==MTYPE_A000) { // RAM A mirror
2858 host_tempreg_acquire();
2859 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2860 addr=*addr_reg_override=HOST_TEMPREG;
2863 else if(type==MTYPE_1F80) { // scratchpad
2864 if (psxH == (void *)0x1f800000) {
2865 host_tempreg_acquire();
2866 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
2867 emit_cmpimm(HOST_TEMPREG,0x1000);
2868 host_tempreg_release();
2873 // do the usual RAM check, jump will go to the right handler
2878 if (type == 0) // need ram check
2880 emit_cmpimm(addr,RAM_SIZE);
2882 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2883 // Hint to branch predictor that the branch is unlikely to be taken
2884 if (dops[i].rs1 >= 28)
2885 emit_jno_unlikely(0);
2889 if (ram_offset != 0)
2890 *offset_reg = get_ro_reg(i_regs, 0);
2896 // return memhandler, or get directly accessable address and return 0
2897 static void *get_direct_memhandler(void *table, u_int addr,
2898 enum stub_type type, uintptr_t *addr_host)
2900 uintptr_t msb = 1ull << (sizeof(uintptr_t)*8 - 1);
2901 uintptr_t l1, l2 = 0;
2902 l1 = ((uintptr_t *)table)[addr>>12];
2904 uintptr_t v = l1 << 1;
2905 *addr_host = v + addr;
2910 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2911 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2912 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2913 l2 = ((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2915 l2 = ((uintptr_t *)l1)[(addr&0xfff)/4];
2917 uintptr_t v = l2 << 1;
2918 *addr_host = v + (addr&0xfff);
2921 return (void *)(l2 << 1);
2925 static u_int get_host_reglist(const signed char *regmap)
2927 u_int reglist = 0, hr;
2928 for (hr = 0; hr < HOST_REGS; hr++) {
2929 if (hr != EXCLUDE_REG && regmap[hr] >= 0)
2935 static u_int reglist_exclude(u_int reglist, int r1, int r2)
2938 reglist &= ~(1u << r1);
2940 reglist &= ~(1u << r2);
2944 // find a temp caller-saved register not in reglist (so assumed to be free)
2945 static int reglist_find_free(u_int reglist)
2947 u_int free_regs = ~reglist & CALLER_SAVE_REGS;
2950 return __builtin_ctz(free_regs);
2953 static void do_load_word(int a, int rt, int offset_reg)
2955 if (offset_reg >= 0)
2956 emit_ldr_dualindexed(offset_reg, a, rt);
2958 emit_readword_indexed(0, a, rt);
2961 static void do_store_word(int a, int ofs, int rt, int offset_reg, int preseve_a)
2963 if (offset_reg < 0) {
2964 emit_writeword_indexed(rt, ofs, a);
2968 emit_addimm(a, ofs, a);
2969 emit_str_dualindexed(offset_reg, a, rt);
2970 if (ofs != 0 && preseve_a)
2971 emit_addimm(a, -ofs, a);
2974 static void do_store_hword(int a, int ofs, int rt, int offset_reg, int preseve_a)
2976 if (offset_reg < 0) {
2977 emit_writehword_indexed(rt, ofs, a);
2981 emit_addimm(a, ofs, a);
2982 emit_strh_dualindexed(offset_reg, a, rt);
2983 if (ofs != 0 && preseve_a)
2984 emit_addimm(a, -ofs, a);
2987 static void do_store_byte(int a, int rt, int offset_reg)
2989 if (offset_reg >= 0)
2990 emit_strb_dualindexed(offset_reg, a, rt);
2992 emit_writebyte_indexed(rt, 0, a);
2995 static void load_assemble(int i, const struct regstat *i_regs, int ccadj_)
2997 int addr = cinfo[i].addr;
3001 int memtarget=0,c=0;
3002 int offset_reg = -1;
3003 int fastio_reg_override = -1;
3004 u_int reglist=get_host_reglist(i_regs->regmap);
3005 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3006 s=get_reg(i_regs->regmap,dops[i].rs1);
3007 offset=cinfo[i].imm;
3008 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3010 c=(i_regs->wasconst>>s)&1;
3012 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3015 //printf("load_assemble: c=%d\n",c);
3016 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
3017 if(tl<0 && ((!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80) || dops[i].rt1==0)) {
3018 // could be FIFO, must perform the read
3020 assem_debug("(forced read)\n");
3021 tl = get_reg_temp(i_regs->regmap); // may be == addr
3026 //printf("load_assemble: c=%d\n",c);
3027 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
3031 // Strmnnrmn's speed hack
3032 if(dops[i].rs1!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3035 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
3036 &offset_reg, &fastio_reg_override, ccadj_);
3039 else if (ram_offset && memtarget) {
3040 offset_reg = get_ro_reg(i_regs, 0);
3042 int dummy=(dops[i].rt1==0)||(tl!=get_reg_w(i_regs->regmap, dops[i].rt1)); // ignore loads to r0 and unneeded reg
3043 switch (dops[i].opcode) {
3048 if (fastio_reg_override >= 0)
3049 a = fastio_reg_override;
3051 if (offset_reg >= 0)
3052 emit_ldrsb_dualindexed(offset_reg, a, tl);
3054 emit_movsbl_indexed(0, a, tl);
3057 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3060 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3066 if (fastio_reg_override >= 0)
3067 a = fastio_reg_override;
3068 if (offset_reg >= 0)
3069 emit_ldrsh_dualindexed(offset_reg, a, tl);
3071 emit_movswl_indexed(0, a, tl);
3074 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3077 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3083 if (fastio_reg_override >= 0)
3084 a = fastio_reg_override;
3085 do_load_word(a, tl, offset_reg);
3088 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3091 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3097 if (fastio_reg_override >= 0)
3098 a = fastio_reg_override;
3100 if (offset_reg >= 0)
3101 emit_ldrb_dualindexed(offset_reg, a, tl);
3103 emit_movzbl_indexed(0, a, tl);
3106 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3109 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3115 if (fastio_reg_override >= 0)
3116 a = fastio_reg_override;
3117 if (offset_reg >= 0)
3118 emit_ldrh_dualindexed(offset_reg, a, tl);
3120 emit_movzwl_indexed(0, a, tl);
3123 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3126 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3132 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3133 host_tempreg_release();
3136 #ifndef loadlr_assemble
3137 static void loadlr_assemble(int i, const struct regstat *i_regs, int ccadj_)
3139 int addr = cinfo[i].addr;
3140 int s,tl,temp,temp2;
3143 int memtarget=0,c=0;
3144 int offset_reg = -1;
3145 int fastio_reg_override = -1;
3146 u_int reglist=get_host_reglist(i_regs->regmap);
3147 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3148 s=get_reg(i_regs->regmap,dops[i].rs1);
3149 temp=get_reg_temp(i_regs->regmap);
3150 temp2=get_reg(i_regs->regmap,FTEMP);
3151 offset=cinfo[i].imm;
3155 c=(i_regs->wasconst>>s)&1;
3157 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3161 emit_shlimm(addr,3,temp);
3162 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3163 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
3165 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
3167 jaddr = emit_fastpath_cmp_jump(i, i_regs, temp2,
3168 &offset_reg, &fastio_reg_override, ccadj_);
3171 if (ram_offset && memtarget) {
3172 offset_reg = get_ro_reg(i_regs, 0);
3174 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3175 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
3177 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
3180 if (dops[i].opcode==0x22||dops[i].opcode==0x26) { // LWL/LWR
3183 if (fastio_reg_override >= 0)
3184 a = fastio_reg_override;
3185 do_load_word(a, temp2, offset_reg);
3186 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3187 host_tempreg_release();
3188 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj_,reglist);
3191 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj_,reglist);
3194 emit_andimm(temp,24,temp);
3195 if (dops[i].opcode==0x22) // LWL
3196 emit_xorimm(temp,24,temp);
3197 host_tempreg_acquire();
3198 emit_movimm(-1,HOST_TEMPREG);
3199 if (dops[i].opcode==0x26) {
3200 emit_shr(temp2,temp,temp2);
3201 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
3203 emit_shl(temp2,temp,temp2);
3204 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
3206 host_tempreg_release();
3207 emit_or(temp2,tl,tl);
3209 //emit_storereg(dops[i].rt1,tl); // DEBUG
3211 if (dops[i].opcode==0x1A||dops[i].opcode==0x1B) { // LDL/LDR
3217 static void do_invstub(int n)
3220 assem_debug("do_invstub\n");
3221 u_int reglist = stubs[n].a;
3222 u_int addrr = stubs[n].b;
3223 int ofs_start = stubs[n].c;
3224 int ofs_end = stubs[n].d;
3225 int len = ofs_end - ofs_start;
3228 set_jump_target(stubs[n].addr, out);
3230 if (addrr != 0 || ofs_start != 0)
3231 emit_addimm(addrr, ofs_start, 0);
3232 emit_readword(&inv_code_start, 2);
3233 emit_readword(&inv_code_end, 3);
3235 emit_addimm(0, len + 4, (rightr = 1));
3237 emit_cmpcs(3, rightr);
3240 void *func = (len != 0)
3241 ? (void *)ndrc_write_invalidate_many
3242 : (void *)ndrc_write_invalidate_one;
3243 emit_far_call(func);
3244 set_jump_target(jaddr, out);
3245 restore_regs(reglist);
3246 emit_jmp(stubs[n].retaddr);
3249 static void do_store_smc_check(int i, const struct regstat *i_regs, u_int reglist, int addr)
3251 if (HACK_ENABLED(NDHACK_NO_SMC_CHECK))
3253 // this can't be used any more since we started to check exact
3254 // block boundaries in invalidate_range()
3255 //if (i_regs->waswritten & (1<<dops[i].rs1))
3257 // (naively) assume nobody will run code from stack
3258 if (dops[i].rs1 == 29)
3261 int j, imm_maxdiff = 32, imm_min = cinfo[i].imm, imm_max = cinfo[i].imm, count = 1;
3262 if (i < slen - 1 && dops[i+1].is_store && dops[i+1].rs1 == dops[i].rs1
3263 && abs(cinfo[i+1].imm - cinfo[i].imm) <= imm_maxdiff)
3265 for (j = i - 1; j >= 0; j--) {
3266 if (!dops[j].is_store || dops[j].rs1 != dops[i].rs1
3267 || abs(cinfo[j].imm - cinfo[j+1].imm) > imm_maxdiff)
3270 if (imm_min > cinfo[j].imm)
3271 imm_min = cinfo[j].imm;
3272 if (imm_max < cinfo[j].imm)
3273 imm_max = cinfo[j].imm;
3275 #if defined(HOST_IMM8)
3276 int ir = get_reg(i_regs->regmap, INVCP);
3278 host_tempreg_acquire();
3279 emit_ldrb_indexedsr12_reg(ir, addr, HOST_TEMPREG);
3281 emit_cmpmem_indexedsr12_imm(invalid_code, addr, 1);
3284 #ifdef INVALIDATE_USE_COND_CALL
3286 emit_cmpimm(HOST_TEMPREG, 1);
3287 emit_callne(invalidate_addr_reg[addr]);
3288 host_tempreg_release();
3292 void *jaddr = emit_cbz(HOST_TEMPREG, 0);
3293 host_tempreg_release();
3294 imm_min -= cinfo[i].imm;
3295 imm_max -= cinfo[i].imm;
3296 add_stub(INVCODE_STUB, jaddr, out, reglist|(1<<HOST_CCREG),
3297 addr, imm_min, imm_max, 0);
3300 static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
3303 int addr = cinfo[i].addr;
3306 enum stub_type type=0;
3307 int memtarget=0,c=0;
3308 int offset_reg = -1;
3309 int fastio_reg_override = -1;
3310 u_int reglist=get_host_reglist(i_regs->regmap);
3311 tl=get_reg(i_regs->regmap,dops[i].rs2);
3312 s=get_reg(i_regs->regmap,dops[i].rs1);
3313 offset=cinfo[i].imm;
3315 c=(i_regs->wasconst>>s)&1;
3317 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3322 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3324 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
3325 &offset_reg, &fastio_reg_override, ccadj_);
3327 else if (ram_offset && memtarget) {
3328 offset_reg = get_ro_reg(i_regs, 0);
3331 switch (dops[i].opcode) {
3335 if (fastio_reg_override >= 0)
3336 a = fastio_reg_override;
3337 do_store_byte(a, tl, offset_reg);
3344 if (fastio_reg_override >= 0)
3345 a = fastio_reg_override;
3346 do_store_hword(a, 0, tl, offset_reg, 1);
3353 if (fastio_reg_override >= 0)
3354 a = fastio_reg_override;
3355 do_store_word(a, 0, tl, offset_reg, 1);
3362 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3363 host_tempreg_release();
3365 // PCSX store handlers don't check invcode again
3367 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3372 do_store_smc_check(i, i_regs, reglist, addr);
3375 u_int addr_val=constmap[i][s]+offset;
3377 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3378 } else if(c&&!memtarget) {
3379 inline_writestub(type,i,addr_val,i_regs->regmap,dops[i].rs2,ccadj_,reglist);
3381 // basic current block modification detection..
3382 // not looking back as that should be in mips cache already
3383 // (see Spyro2 title->attract mode)
3384 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3385 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3386 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3387 if(i_regs->regmap==regs[i].regmap) {
3388 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3389 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
3390 emit_movimm(start+i*4+4,0);
3391 emit_writeword(0,&pcaddr);
3392 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3393 emit_far_call(ndrc_get_addr_ht);
3399 static void storelr_assemble(int i, const struct regstat *i_regs, int ccadj_)
3401 int addr = cinfo[i].addr;
3405 void *case1, *case23, *case3;
3406 void *done0, *done1, *done2;
3407 int memtarget=0,c=0;
3408 int offset_reg = -1;
3409 u_int reglist=get_host_reglist(i_regs->regmap);
3410 tl=get_reg(i_regs->regmap,dops[i].rs2);
3411 s=get_reg(i_regs->regmap,dops[i].rs1);
3412 offset=cinfo[i].imm;
3414 c=(i_regs->isconst>>s)&1;
3416 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3422 emit_cmpimm(addr, RAM_SIZE);
3428 if(!memtarget||!dops[i].rs1) {
3434 offset_reg = get_ro_reg(i_regs, 0);
3436 emit_testimm(addr,2);
3439 emit_testimm(addr,1);
3443 if (dops[i].opcode == 0x2A) { // SWL
3444 // Write msb into least significant byte
3445 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
3446 do_store_byte(addr, tl, offset_reg);
3447 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3449 else if (dops[i].opcode == 0x2E) { // SWR
3450 // Write entire word
3451 do_store_word(addr, 0, tl, offset_reg, 1);
3456 set_jump_target(case1, out);
3457 if (dops[i].opcode == 0x2A) { // SWL
3458 // Write two msb into two least significant bytes
3459 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3460 do_store_hword(addr, -1, tl, offset_reg, 0);
3461 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3463 else if (dops[i].opcode == 0x2E) { // SWR
3464 // Write 3 lsb into three most significant bytes
3465 do_store_byte(addr, tl, offset_reg);
3466 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3467 do_store_hword(addr, 1, tl, offset_reg, 0);
3468 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
3473 set_jump_target(case23, out);
3474 emit_testimm(addr,1);
3478 if (dops[i].opcode==0x2A) { // SWL
3479 // Write 3 msb into three least significant bytes
3480 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3481 do_store_hword(addr, -2, tl, offset_reg, 1);
3482 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3483 do_store_byte(addr, tl, offset_reg);
3484 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3486 else if (dops[i].opcode == 0x2E) { // SWR
3487 // Write two lsb into two most significant bytes
3488 do_store_hword(addr, 0, tl, offset_reg, 1);
3493 set_jump_target(case3, out);
3494 if (dops[i].opcode == 0x2A) { // SWL
3495 do_store_word(addr, -3, tl, offset_reg, 0);
3497 else if (dops[i].opcode == 0x2E) { // SWR
3498 do_store_byte(addr, tl, offset_reg);
3500 set_jump_target(done0, out);
3501 set_jump_target(done1, out);
3502 set_jump_target(done2, out);
3503 if (offset_reg == HOST_TEMPREG)
3504 host_tempreg_release();
3506 add_stub_r(STORELR_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3507 do_store_smc_check(i, i_regs, reglist, addr);
3510 static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
3512 if(dops[i].opcode2==0) // MFC0
3514 signed char t=get_reg_w(i_regs->regmap, dops[i].rt1);
3515 u_int copr=(source[i]>>11)&0x1f;
3516 if(t>=0&&dops[i].rt1!=0) {
3517 emit_readword(®_cop0[copr],t);
3520 else if(dops[i].opcode2==4) // MTC0
3522 int s = get_reg(i_regs->regmap, dops[i].rs1);
3523 int cc = get_reg(i_regs->regmap, CCREG);
3524 char copr=(source[i]>>11)&0x1f;
3526 wb_register(dops[i].rs1,i_regs->regmap,i_regs->dirty);
3527 if (copr == 12 || copr == 13) {
3528 emit_readword(&last_count,HOST_TEMPREG);
3529 if (cc != HOST_CCREG)
3530 emit_loadreg(CCREG, HOST_CCREG);
3531 emit_add(HOST_CCREG, HOST_TEMPREG, HOST_CCREG);
3532 emit_addimm(HOST_CCREG, ccadj_ + 2, HOST_CCREG);
3533 emit_writeword(HOST_CCREG, &psxRegs.cycle);
3535 // burn cycles to cause cc_interrupt, which will
3536 // reschedule next_interupt. Relies on CCREG from above.
3537 assem_debug("MTC0 DS %d\n", copr);
3538 emit_writeword(HOST_CCREG,&last_count);
3539 emit_movimm(0,HOST_CCREG);
3540 emit_storereg(CCREG,HOST_CCREG);
3541 emit_loadreg(dops[i].rs1,1);
3542 emit_movimm(copr,0);
3543 emit_far_call(pcsx_mtc0_ds);
3544 emit_loadreg(dops[i].rs1,s);
3547 emit_movimm(start+i*4+4,HOST_TEMPREG);
3548 emit_writeword(HOST_TEMPREG,&pcaddr);
3549 emit_movimm(0,HOST_TEMPREG);
3550 emit_writeword(HOST_TEMPREG,&pending_exception);
3554 emit_movimm(copr, 0);
3555 emit_far_call(pcsx_mtc0);
3556 if (copr == 12 || copr == 13) {
3557 emit_readword(&psxRegs.cycle,HOST_CCREG);
3558 emit_readword(&last_count,HOST_TEMPREG);
3559 emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3560 //emit_writeword(HOST_TEMPREG,&last_count);
3561 assert(!is_delayslot);
3562 emit_readword(&pending_exception,HOST_TEMPREG);
3563 emit_test(HOST_TEMPREG,HOST_TEMPREG);
3566 emit_readword(&pcaddr, 0);
3567 emit_far_call(ndrc_get_addr_ht);
3569 set_jump_target(jaddr, out);
3570 emit_addimm(HOST_CCREG, -ccadj_ - 2, HOST_CCREG);
3571 if (cc != HOST_CCREG)
3572 emit_storereg(CCREG, HOST_CCREG);
3574 emit_loadreg(dops[i].rs1,s);
3578 static void rfe_assemble(int i, const struct regstat *i_regs)
3580 emit_readword(&psxRegs.CP0.n.SR, 0);
3581 emit_andimm(0, 0x3c, 1);
3582 emit_andimm(0, ~0xf, 0);
3583 emit_orrshr_imm(1, 2, 0);
3584 emit_writeword(0, &psxRegs.CP0.n.SR);
3587 static int cop2_is_stalling_op(int i, int *cycles)
3589 if (dops[i].opcode == 0x3a) { // SWC2
3593 if (dops[i].itype == COP2 && (dops[i].opcode2 == 0 || dops[i].opcode2 == 2)) { // MFC2/CFC2
3597 if (dops[i].itype == C2OP) {
3598 *cycles = gte_cycletab[source[i] & 0x3f];
3601 // ... what about MTC2/CTC2/LWC2?
3606 static void log_gte_stall(int stall, u_int cycle)
3608 if ((u_int)stall <= 44)
3609 printf("x stall %2d %u\n", stall, cycle + last_count);
3612 static void emit_log_gte_stall(int i, int stall, u_int reglist)
3616 emit_movimm(stall, 0);
3618 emit_mov(HOST_TEMPREG, 0);
3619 emit_addimm(HOST_CCREG, cinfo[i].ccadj, 1);
3620 emit_far_call(log_gte_stall);
3621 restore_regs(reglist);
3625 static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
3627 int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
3628 int rtmp = reglist_find_free(reglist);
3630 if (HACK_ENABLED(NDHACK_NO_STALLS))
3632 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3633 // happens occasionally... cc evicted? Don't bother then
3634 //printf("no cc %08x\n", start + i*4);
3638 for (j = i - 1; j >= 0; j--) {
3639 //if (dops[j].is_ds) break;
3640 if (cop2_is_stalling_op(j, &other_gte_op_cycles) || dops[j].bt)
3642 if (j > 0 && cinfo[j - 1].ccadj > cinfo[j].ccadj)
3647 cycles_passed = cinfo[i].ccadj - cinfo[j].ccadj;
3648 if (other_gte_op_cycles >= 0)
3649 stall = other_gte_op_cycles - cycles_passed;
3650 else if (cycles_passed >= 44)
3651 stall = 0; // can't stall
3652 if (stall == -MAXBLOCK && rtmp >= 0) {
3653 // unknown stall, do the expensive runtime check
3654 assem_debug("; cop2_do_stall_check\n");
3657 emit_movimm(gte_cycletab[op], 0);
3658 emit_addimm(HOST_CCREG, cinfo[i].ccadj, 1);
3659 emit_far_call(call_gteStall);
3660 restore_regs(reglist);
3662 host_tempreg_acquire();
3663 emit_readword(&psxRegs.gteBusyCycle, rtmp);
3664 emit_addimm(rtmp, -cinfo[i].ccadj, rtmp);
3665 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3666 emit_cmpimm(HOST_TEMPREG, 44);
3667 emit_cmovb_reg(rtmp, HOST_CCREG);
3668 //emit_log_gte_stall(i, 0, reglist);
3669 host_tempreg_release();
3672 else if (stall > 0) {
3673 //emit_log_gte_stall(i, stall, reglist);
3674 emit_addimm(HOST_CCREG, stall, HOST_CCREG);
3677 // save gteBusyCycle, if needed
3678 if (gte_cycletab[op] == 0)
3680 other_gte_op_cycles = -1;
3681 for (j = i + 1; j < slen; j++) {
3682 if (cop2_is_stalling_op(j, &other_gte_op_cycles))
3684 if (dops[j].is_jump) {
3686 if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
3691 if (other_gte_op_cycles >= 0)
3692 // will handle stall when assembling that op
3694 cycles_passed = cinfo[min(j, slen -1)].ccadj - cinfo[i].ccadj;
3695 if (cycles_passed >= 44)
3697 assem_debug("; save gteBusyCycle\n");
3698 host_tempreg_acquire();
3700 emit_readword(&last_count, HOST_TEMPREG);
3701 emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
3702 emit_addimm(HOST_TEMPREG, cinfo[i].ccadj, HOST_TEMPREG);
3703 emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
3704 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3706 emit_addimm(HOST_CCREG, cinfo[i].ccadj + gte_cycletab[op], HOST_TEMPREG);
3707 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3709 host_tempreg_release();
3712 static int is_mflohi(int i)
3714 return (dops[i].itype == MOV && (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG));
3717 static int check_multdiv(int i, int *cycles)
3719 if (dops[i].itype != MULTDIV)
3721 if (dops[i].opcode2 == 0x18 || dops[i].opcode2 == 0x19) // MULT(U)
3722 *cycles = 11; // approx from 7 11 14
3728 static void multdiv_prepare_stall(int i, const struct regstat *i_regs, int ccadj_)
3730 int j, found = 0, c = 0;
3731 if (HACK_ENABLED(NDHACK_NO_STALLS))
3733 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3734 // happens occasionally... cc evicted? Don't bother then
3737 for (j = i + 1; j < slen; j++) {
3740 if ((found = is_mflohi(j)))
3742 if (dops[j].is_jump) {
3744 if (j + 1 < slen && (found = is_mflohi(j + 1)))
3750 // handle all in multdiv_do_stall()
3752 check_multdiv(i, &c);
3754 assem_debug("; muldiv prepare stall %d\n", c);
3755 host_tempreg_acquire();
3756 emit_addimm(HOST_CCREG, ccadj_ + c, HOST_TEMPREG);
3757 emit_writeword(HOST_TEMPREG, &psxRegs.muldivBusyCycle);
3758 host_tempreg_release();
3761 static void multdiv_do_stall(int i, const struct regstat *i_regs)
3763 int j, known_cycles = 0;
3764 u_int reglist = get_host_reglist(i_regs->regmap);
3765 int rtmp = get_reg_temp(i_regs->regmap);
3767 rtmp = reglist_find_free(reglist);
3768 if (HACK_ENABLED(NDHACK_NO_STALLS))
3770 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG || rtmp < 0) {
3771 // happens occasionally... cc evicted? Don't bother then
3772 //printf("no cc/rtmp %08x\n", start + i*4);
3776 for (j = i - 1; j >= 0; j--) {
3777 if (dops[j].is_ds) break;
3778 if (check_multdiv(j, &known_cycles))
3781 // already handled by this op
3783 if (dops[j].bt || (j > 0 && cinfo[j - 1].ccadj > cinfo[j].ccadj))
3788 if (known_cycles > 0) {
3789 known_cycles -= cinfo[i].ccadj - cinfo[j].ccadj;
3790 assem_debug("; muldiv stall resolved %d\n", known_cycles);
3791 if (known_cycles > 0)
3792 emit_addimm(HOST_CCREG, known_cycles, HOST_CCREG);
3795 assem_debug("; muldiv stall unresolved\n");
3796 host_tempreg_acquire();
3797 emit_readword(&psxRegs.muldivBusyCycle, rtmp);
3798 emit_addimm(rtmp, -cinfo[i].ccadj, rtmp);
3799 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3800 emit_cmpimm(HOST_TEMPREG, 37);
3801 emit_cmovb_reg(rtmp, HOST_CCREG);
3802 //emit_log_gte_stall(i, 0, reglist);
3803 host_tempreg_release();
3806 static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3816 emit_readword(®_cop2d[copr],tl);
3817 emit_signextend16(tl,tl);
3818 emit_writeword(tl,®_cop2d[copr]); // hmh
3825 emit_readword(®_cop2d[copr],tl);
3826 emit_andimm(tl,0xffff,tl);
3827 emit_writeword(tl,®_cop2d[copr]);
3830 emit_readword(®_cop2d[14],tl); // SXY2
3831 emit_writeword(tl,®_cop2d[copr]);
3835 c2op_mfc2_29_assemble(tl,temp);
3838 emit_readword(®_cop2d[copr],tl);
3843 static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
3847 emit_readword(®_cop2d[13],temp); // SXY1
3848 emit_writeword(sl,®_cop2d[copr]);
3849 emit_writeword(temp,®_cop2d[12]); // SXY0
3850 emit_readword(®_cop2d[14],temp); // SXY2
3851 emit_writeword(sl,®_cop2d[14]);
3852 emit_writeword(temp,®_cop2d[13]); // SXY1
3855 emit_andimm(sl,0x001f,temp);
3856 emit_shlimm(temp,7,temp);
3857 emit_writeword(temp,®_cop2d[9]);
3858 emit_andimm(sl,0x03e0,temp);
3859 emit_shlimm(temp,2,temp);
3860 emit_writeword(temp,®_cop2d[10]);
3861 emit_andimm(sl,0x7c00,temp);
3862 emit_shrimm(temp,3,temp);
3863 emit_writeword(temp,®_cop2d[11]);
3864 emit_writeword(sl,®_cop2d[28]);
3867 emit_xorsar_imm(sl,sl,31,temp);
3868 #if defined(HAVE_ARMV5) || defined(__aarch64__)
3869 emit_clz(temp,temp);
3871 emit_movs(temp,HOST_TEMPREG);
3872 emit_movimm(0,temp);
3873 emit_jeq((int)out+4*4);
3874 emit_addpl_imm(temp,1,temp);
3875 emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
3876 emit_jns((int)out-2*4);
3878 emit_writeword(sl,®_cop2d[30]);
3879 emit_writeword(temp,®_cop2d[31]);
3884 emit_writeword(sl,®_cop2d[copr]);
3889 static void c2ls_assemble(int i, const struct regstat *i_regs, int ccadj_)
3894 int memtarget=0,c=0;
3896 enum stub_type type;
3897 int offset_reg = -1;
3898 int fastio_reg_override = -1;
3899 u_int reglist=get_host_reglist(i_regs->regmap);
3900 u_int copr=(source[i]>>16)&0x1f;
3901 s=get_reg(i_regs->regmap,dops[i].rs1);
3902 tl=get_reg(i_regs->regmap,FTEMP);
3903 offset=cinfo[i].imm;
3906 if(i_regs->regmap[HOST_CCREG]==CCREG)
3907 reglist&=~(1<<HOST_CCREG);
3912 if (dops[i].opcode==0x3a) { // SWC2
3915 if(s>=0) c=(i_regs->wasconst>>s)&1;
3916 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3918 cop2_do_stall_check(0, i, i_regs, reglist);
3920 if (dops[i].opcode==0x3a) { // SWC2
3921 cop2_get_dreg(copr,tl,-1);
3929 emit_jmp(0); // inline_readstub/inline_writestub?
3933 jaddr2 = emit_fastpath_cmp_jump(i, i_regs, ar,
3934 &offset_reg, &fastio_reg_override, ccadj_);
3936 else if (ram_offset && memtarget) {
3937 offset_reg = get_ro_reg(i_regs, 0);
3939 switch (dops[i].opcode) {
3940 case 0x32: { // LWC2
3942 if (fastio_reg_override >= 0)
3943 a = fastio_reg_override;
3944 do_load_word(a, tl, offset_reg);
3947 case 0x3a: { // SWC2
3948 #ifdef DESTRUCTIVE_SHIFT
3949 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3952 if (fastio_reg_override >= 0)
3953 a = fastio_reg_override;
3954 do_store_word(a, 0, tl, offset_reg, 1);
3961 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3962 host_tempreg_release();
3964 add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
3965 if(dops[i].opcode==0x3a) // SWC2
3966 do_store_smc_check(i, i_regs, reglist, ar);
3967 if (dops[i].opcode==0x32) { // LWC2
3968 host_tempreg_acquire();
3969 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3970 host_tempreg_release();
3974 static void cop2_assemble(int i, const struct regstat *i_regs)
3976 u_int copr = (source[i]>>11) & 0x1f;
3977 signed char temp = get_reg_temp(i_regs->regmap);
3979 if (!HACK_ENABLED(NDHACK_NO_STALLS)) {
3980 u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), temp, -1);
3981 if (dops[i].opcode2 == 0 || dops[i].opcode2 == 2) { // MFC2/CFC2
3982 signed char tl = get_reg(i_regs->regmap, dops[i].rt1);
3983 reglist = reglist_exclude(reglist, tl, -1);
3985 cop2_do_stall_check(0, i, i_regs, reglist);
3987 if (dops[i].opcode2==0) { // MFC2
3988 signed char tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3989 if(tl>=0&&dops[i].rt1!=0)
3990 cop2_get_dreg(copr,tl,temp);
3992 else if (dops[i].opcode2==4) { // MTC2
3993 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
3994 cop2_put_dreg(copr,sl,temp);
3996 else if (dops[i].opcode2==2) // CFC2
3998 signed char tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3999 if(tl>=0&&dops[i].rt1!=0)
4000 emit_readword(®_cop2c[copr],tl);
4002 else if (dops[i].opcode2==6) // CTC2
4004 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
4013 emit_signextend16(sl,temp);
4016 c2op_ctc2_31_assemble(sl,temp);
4022 emit_writeword(temp,®_cop2c[copr]);
4027 static void do_unalignedwritestub(int n)
4029 assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
4031 set_jump_target(stubs[n].addr, out);
4034 struct regstat *i_regs=(struct regstat *)stubs[n].c;
4035 int addr=stubs[n].b;
4036 u_int reglist=stubs[n].e;
4037 signed char *i_regmap=i_regs->regmap;
4038 int temp2=get_reg(i_regmap,FTEMP);
4040 rt=get_reg(i_regmap,dops[i].rs2);
4043 assert(dops[i].opcode==0x2a||dops[i].opcode==0x2e); // SWL/SWR only implemented
4045 reglist&=~(1<<temp2);
4047 // don't bother with it and call write handler
4050 int cc=get_reg(i_regmap,CCREG);
4052 emit_loadreg(CCREG,2);
4053 emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
4054 emit_movimm(start + i*4,3);
4055 emit_writeword(3,&psxRegs.pc);
4056 emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
4057 emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
4059 emit_storereg(CCREG,2);
4060 restore_regs(reglist);
4061 emit_jmp(stubs[n].retaddr); // return address
4064 static void do_overflowstub(int n)
4066 assem_debug("do_overflowstub %x\n", start + (u_int)stubs[n].a * 4);
4069 struct regstat *i_regs = (struct regstat *)stubs[n].c;
4070 int ccadj = stubs[n].d;
4071 set_jump_target(stubs[n].addr, out);
4072 wb_dirtys(regs[i].regmap, regs[i].dirty);
4073 exception_assemble(i, i_regs, ccadj);
4076 static void do_alignmentstub(int n)
4078 assem_debug("do_alignmentstub %x\n", start + (u_int)stubs[n].a * 4);
4081 struct regstat *i_regs = (struct regstat *)stubs[n].c;
4082 int ccadj = stubs[n].d;
4083 int is_store = dops[i].itype == STORE || dops[i].opcode == 0x3A; // SWC2
4084 int cause = (dops[i].opcode & 3) << 28;
4085 cause |= is_store ? (R3000E_AdES << 2) : (R3000E_AdEL << 2);
4086 set_jump_target(stubs[n].addr, out);
4087 wb_dirtys(regs[i].regmap, regs[i].dirty);
4088 if (stubs[n].b != 1)
4089 emit_mov(stubs[n].b, 1); // faulting address
4090 emit_movimm(cause, 0);
4091 exception_assemble(i, i_regs, ccadj);
4094 #ifndef multdiv_assemble
4095 void multdiv_assemble(int i,struct regstat *i_regs)
4097 printf("Need multdiv_assemble for this architecture.\n");
4102 static void mov_assemble(int i, const struct regstat *i_regs)
4104 //if(dops[i].opcode2==0x10||dops[i].opcode2==0x12) { // MFHI/MFLO
4105 //if(dops[i].opcode2==0x11||dops[i].opcode2==0x13) { // MTHI/MTLO
4108 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
4111 sl=get_reg(i_regs->regmap,dops[i].rs1);
4112 if(sl>=0) emit_mov(sl,tl);
4113 else emit_loadreg(dops[i].rs1,tl);
4116 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) // MFHI/MFLO
4117 multdiv_do_stall(i, i_regs);
4120 // call interpreter, exception handler, things that change pc/regs/cycles ...
4121 static void call_c_cpu_handler(int i, const struct regstat *i_regs, int ccadj_, u_int pc, void *func)
4123 signed char ccreg=get_reg(i_regs->regmap,CCREG);
4124 assert(ccreg==HOST_CCREG);
4125 assert(!is_delayslot);
4128 emit_movimm(pc,3); // Get PC
4129 emit_readword(&last_count,2);
4130 emit_writeword(3,&psxRegs.pc);
4131 emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
4132 emit_add(2,HOST_CCREG,2);
4133 emit_writeword(2,&psxRegs.cycle);
4134 emit_addimm_ptr(FP,(u_char *)&psxRegs - (u_char *)&dynarec_local,0);
4135 emit_far_call(func);
4136 emit_far_jump(jump_to_new_pc);
4139 static void exception_assemble(int i, const struct regstat *i_regs, int ccadj_)
4141 // 'break' tends to be littered around to catch things like
4142 // division by 0 and is almost never executed, so don't emit much code here
4144 if (dops[i].itype == ALU || dops[i].itype == IMM16)
4145 func = is_delayslot ? jump_overflow_ds : jump_overflow;
4146 else if (dops[i].itype == LOAD || dops[i].itype == STORE)
4147 func = is_delayslot ? jump_addrerror_ds : jump_addrerror;
4148 else if (dops[i].opcode2 == 0x0C)
4149 func = is_delayslot ? jump_syscall_ds : jump_syscall;
4151 func = is_delayslot ? jump_break_ds : jump_break;
4152 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) // evicted
4153 emit_loadreg(CCREG, HOST_CCREG);
4154 emit_movimm(start + i*4, 2); // pc
4155 emit_addimm(HOST_CCREG, ccadj_ + CLOCK_ADJUST(1), HOST_CCREG);
4156 emit_far_jump(func);
4159 static void hlecall_bad()
4164 static void hlecall_assemble(int i, const struct regstat *i_regs, int ccadj_)
4166 void *hlefunc = hlecall_bad;
4167 uint32_t hleCode = source[i] & 0x03ffffff;
4168 if (hleCode < ARRAY_SIZE(psxHLEt))
4169 hlefunc = psxHLEt[hleCode];
4171 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4+4, hlefunc);
4174 static void intcall_assemble(int i, const struct regstat *i_regs, int ccadj_)
4176 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4, execI);
4179 static void speculate_mov(int rs,int rt)
4182 smrv_strong_next|=1<<rt;
4187 static void speculate_mov_weak(int rs,int rt)
4190 smrv_weak_next|=1<<rt;
4195 static void speculate_register_values(int i)
4198 memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
4199 // gp,sp are likely to stay the same throughout the block
4200 smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
4201 smrv_weak_next=~smrv_strong_next;
4202 //printf(" llr %08x\n", smrv[4]);
4204 smrv_strong=smrv_strong_next;
4205 smrv_weak=smrv_weak_next;
4206 switch(dops[i].itype) {
4208 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4209 else if((smrv_strong>>dops[i].rs2)&1) speculate_mov(dops[i].rs2,dops[i].rt1);
4210 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
4211 else if((smrv_weak>>dops[i].rs2)&1) speculate_mov_weak(dops[i].rs2,dops[i].rt1);
4213 smrv_strong_next&=~(1<<dops[i].rt1);
4214 smrv_weak_next&=~(1<<dops[i].rt1);
4218 smrv_strong_next&=~(1<<dops[i].rt1);
4219 smrv_weak_next&=~(1<<dops[i].rt1);
4222 if(dops[i].rt1&&is_const(®s[i],dops[i].rt1)) {
4223 int hr = get_reg_w(regs[i].regmap, dops[i].rt1);
4226 if(get_final_value(hr,i,&value))
4227 smrv[dops[i].rt1]=value;
4228 else smrv[dops[i].rt1]=constmap[i][hr];
4229 smrv_strong_next|=1<<dops[i].rt1;
4233 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4234 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
4238 if(start<0x2000&&(dops[i].rt1==26||(smrv[dops[i].rt1]>>24)==0xa0)) {
4239 // special case for BIOS
4240 smrv[dops[i].rt1]=0xa0000000;
4241 smrv_strong_next|=1<<dops[i].rt1;
4248 smrv_strong_next&=~(1<<dops[i].rt1);
4249 smrv_weak_next&=~(1<<dops[i].rt1);
4253 if(dops[i].opcode2==0||dops[i].opcode2==2) { // MFC/CFC
4254 smrv_strong_next&=~(1<<dops[i].rt1);
4255 smrv_weak_next&=~(1<<dops[i].rt1);
4259 if (dops[i].opcode==0x32) { // LWC2
4260 smrv_strong_next&=~(1<<dops[i].rt1);
4261 smrv_weak_next&=~(1<<dops[i].rt1);
4267 printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
4268 ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
4272 static void ujump_assemble(int i, const struct regstat *i_regs);
4273 static void rjump_assemble(int i, const struct regstat *i_regs);
4274 static void cjump_assemble(int i, const struct regstat *i_regs);
4275 static void sjump_assemble(int i, const struct regstat *i_regs);
4277 static int assemble(int i, const struct regstat *i_regs, int ccadj_)
4280 switch (dops[i].itype) {
4282 alu_assemble(i, i_regs, ccadj_);
4285 imm16_assemble(i, i_regs, ccadj_);
4288 shift_assemble(i, i_regs);
4291 shiftimm_assemble(i, i_regs);
4294 load_assemble(i, i_regs, ccadj_);
4297 loadlr_assemble(i, i_regs, ccadj_);
4300 store_assemble(i, i_regs, ccadj_);
4303 storelr_assemble(i, i_regs, ccadj_);
4306 cop0_assemble(i, i_regs, ccadj_);
4309 rfe_assemble(i, i_regs);
4312 cop2_assemble(i, i_regs);
4315 c2ls_assemble(i, i_regs, ccadj_);
4318 c2op_assemble(i, i_regs);
4321 multdiv_assemble(i, i_regs);
4322 multdiv_prepare_stall(i, i_regs, ccadj_);
4325 mov_assemble(i, i_regs);
4328 exception_assemble(i, i_regs, ccadj_);
4331 hlecall_assemble(i, i_regs, ccadj_);
4334 intcall_assemble(i, i_regs, ccadj_);
4337 ujump_assemble(i, i_regs);
4341 rjump_assemble(i, i_regs);
4345 cjump_assemble(i, i_regs);
4349 sjump_assemble(i, i_regs);
4354 // not handled, just skip
4362 static void ds_assemble(int i, const struct regstat *i_regs)
4364 speculate_register_values(i);
4366 switch (dops[i].itype) {
4374 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
4377 assemble(i, i_regs, cinfo[i].ccadj);
4382 // Is the branch target a valid internal jump?
4383 static int internal_branch(int addr)
4385 if(addr&1) return 0; // Indirect (register) jump
4386 if(addr>=start && addr<start+slen*4-4)
4393 static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
4396 for(hr=0;hr<HOST_REGS;hr++) {
4397 if(hr!=EXCLUDE_REG) {
4398 if(pre[hr]!=entry[hr]) {
4401 if(get_reg(entry,pre[hr])<0) {
4403 if(!((u>>pre[hr])&1))
4404 emit_storereg(pre[hr],hr);
4411 // Move from one register to another (no writeback)
4412 for(hr=0;hr<HOST_REGS;hr++) {
4413 if(hr!=EXCLUDE_REG) {
4414 if(pre[hr]!=entry[hr]) {
4415 if(pre[hr]>=0&&pre[hr]<TEMPREG) {
4417 if((nr=get_reg(entry,pre[hr]))>=0) {
4426 // Load the specified registers
4427 // This only loads the registers given as arguments because
4428 // we don't want to load things that will be overwritten
4429 static inline void load_reg(signed char entry[], signed char regmap[], int rs)
4431 int hr = get_reg(regmap, rs);
4432 if (hr >= 0 && entry[hr] != regmap[hr])
4433 emit_loadreg(regmap[hr], hr);
4436 static void load_regs(signed char entry[], signed char regmap[], int rs1, int rs2)
4438 load_reg(entry, regmap, rs1);
4440 load_reg(entry, regmap, rs2);
4443 // Load registers prior to the start of a loop
4444 // so that they are not loaded within the loop
4445 static void loop_preload(signed char pre[],signed char entry[])
4448 for (hr = 0; hr < HOST_REGS; hr++) {
4450 if (r >= 0 && pre[hr] != r && get_reg(pre, r) < 0) {
4451 assem_debug("loop preload:\n");
4453 emit_loadreg(r, hr);
4458 // Generate address for load/store instruction
4459 // goes to AGEN (or temp) for writes, FTEMP for LOADLR and cop1/2 loads
4460 // AGEN is assigned by pass5b_preallocate2
4461 static void address_generation(int i, const struct regstat *i_regs, signed char entry[])
4463 if (dops[i].is_load || dops[i].is_store) {
4465 int agr = AGEN1 + (i&1);
4466 if(dops[i].itype==LOAD) {
4467 if (!dops[i].may_except)
4468 ra = get_reg_w(i_regs->regmap, dops[i].rt1); // reuse dest for agen
4470 ra = get_reg_temp(i_regs->regmap);
4472 if(dops[i].itype==LOADLR) {
4473 ra=get_reg(i_regs->regmap,FTEMP);
4475 if(dops[i].itype==STORE||dops[i].itype==STORELR) {
4476 ra=get_reg(i_regs->regmap,agr);
4477 if(ra<0) ra=get_reg_temp(i_regs->regmap);
4479 if(dops[i].itype==C2LS) {
4480 if (dops[i].opcode == 0x32) // LWC2
4481 ra=get_reg(i_regs->regmap,FTEMP);
4483 ra=get_reg(i_regs->regmap,agr);
4484 if(ra<0) ra=get_reg_temp(i_regs->regmap);
4487 int rs = get_reg(i_regs->regmap, dops[i].rs1);
4490 int offset = cinfo[i].imm;
4491 int add_offset = offset != 0;
4492 int c = rs >= 0 && ((i_regs->wasconst >> rs) & 1);
4493 if(dops[i].rs1==0) {
4494 // Using r0 as a base address
4496 if(!entry||entry[ra]!=agr) {
4497 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
4498 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4500 emit_movimm(offset,ra);
4502 } // else did it in the previous cycle
4508 if (!entry || entry[ra] != dops[i].rs1)
4509 emit_loadreg(dops[i].rs1, ra);
4511 //if(!entry||entry[ra]!=dops[i].rs1)
4512 // printf("poor load scheduling!\n");
4515 if(dops[i].rs1!=dops[i].rt1||dops[i].itype!=LOAD) {
4517 if(!entry||entry[ra]!=agr) {
4518 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
4519 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4521 emit_movimm(constmap[i][rs]+offset,ra);
4522 regs[i].loadedconst|=1<<ra;
4524 } // else did it in the previous cycle
4527 else // else load_consts already did it
4531 else if (dops[i].itype == STORELR) { // overwrites addr
4542 emit_addimm(rs,offset,ra);
4544 emit_addimm(ra,offset,ra);
4549 assert(cinfo[i].addr >= 0);
4551 // Preload constants for next instruction
4552 if (dops[i+1].is_load || dops[i+1].is_store) {
4555 agr=AGEN1+((i+1)&1);
4556 ra=get_reg(i_regs->regmap,agr);
4558 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
4559 int offset=cinfo[i+1].imm;
4560 int c=(regs[i+1].wasconst>>rs)&1;
4561 if(c&&(dops[i+1].rs1!=dops[i+1].rt1||dops[i+1].itype!=LOAD)) {
4562 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
4563 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4564 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
4565 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4567 emit_movimm(constmap[i+1][rs]+offset,ra);
4568 regs[i+1].loadedconst|=1<<ra;
4571 else if(dops[i+1].rs1==0) {
4572 // Using r0 as a base address
4573 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
4574 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4575 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
4576 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4578 emit_movimm(offset,ra);
4585 static int get_final_value(int hr, int i, u_int *value)
4587 int reg=regs[i].regmap[hr];
4589 if(regs[i+1].regmap[hr]!=reg) break;
4590 if(!((regs[i+1].isconst>>hr)&1)) break;
4591 if(dops[i+1].bt) break;
4595 if (dops[i].is_jump) {
4596 *value=constmap[i][hr];
4600 if (dops[i+1].is_jump) {
4601 // Load in delay slot, out-of-order execution
4602 if(dops[i+2].itype==LOAD&&dops[i+2].rs1==reg&&dops[i+2].rt1==reg&&((regs[i+1].wasconst>>hr)&1))
4604 // Precompute load address
4605 *value=constmap[i][hr]+cinfo[i+2].imm;
4609 if(dops[i+1].itype==LOAD&&dops[i+1].rs1==reg&&dops[i+1].rt1==reg)
4611 // Precompute load address
4612 *value=constmap[i][hr]+cinfo[i+1].imm;
4613 //printf("c=%x imm=%lx\n",(long)constmap[i][hr],cinfo[i+1].imm);
4618 *value=constmap[i][hr];
4619 //printf("c=%lx\n",(long)constmap[i][hr]);
4620 if(i==slen-1) return 1;
4622 return !((unneeded_reg[i+1]>>reg)&1);
4625 // Load registers with known constants
4626 static void load_consts(signed char pre[],signed char regmap[],int i)
4629 // propagate loaded constant flags
4630 if(i==0||dops[i].bt)
4631 regs[i].loadedconst=0;
4633 for(hr=0;hr<HOST_REGS;hr++) {
4634 if(hr!=EXCLUDE_REG&®map[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
4635 &®map[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
4637 regs[i].loadedconst|=1<<hr;
4642 for(hr=0;hr<HOST_REGS;hr++) {
4643 if(hr!=EXCLUDE_REG&®map[hr]>=0) {
4644 //if(entry[hr]!=regmap[hr]) {
4645 if(!((regs[i].loadedconst>>hr)&1)) {
4646 assert(regmap[hr]<64);
4647 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4648 u_int value, similar=0;
4649 if(get_final_value(hr,i,&value)) {
4650 // see if some other register has similar value
4651 for(hr2=0;hr2<HOST_REGS;hr2++) {
4652 if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
4653 if(is_similar_value(value,constmap[i][hr2])) {
4661 if(get_final_value(hr2,i,&value2)) // is this needed?
4662 emit_movimm_from(value2,hr2,value,hr);
4664 emit_movimm(value,hr);
4670 emit_movimm(value,hr);
4673 regs[i].loadedconst|=1<<hr;
4680 static void load_all_consts(const signed char regmap[], u_int dirty, int i)
4684 for(hr=0;hr<HOST_REGS;hr++) {
4685 if(hr!=EXCLUDE_REG&®map[hr]>=0&&((dirty>>hr)&1)) {
4686 assert(regmap[hr] < 64);
4687 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4688 int value=constmap[i][hr];
4693 emit_movimm(value,hr);
4700 // Write out all dirty registers (except cycle count)
4701 static void wb_dirtys(const signed char i_regmap[], uint64_t i_dirty)
4704 for(hr=0;hr<HOST_REGS;hr++) {
4705 if(hr!=EXCLUDE_REG) {
4706 if(i_regmap[hr]>0) {
4707 if(i_regmap[hr]!=CCREG) {
4708 if((i_dirty>>hr)&1) {
4709 assert(i_regmap[hr]<64);
4710 emit_storereg(i_regmap[hr],hr);
4718 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4719 // This writes the registers not written by store_regs_bt
4720 static void wb_needed_dirtys(const signed char i_regmap[], uint64_t i_dirty, int addr)
4723 int t=(addr-start)>>2;
4724 for(hr=0;hr<HOST_REGS;hr++) {
4725 if(hr!=EXCLUDE_REG) {
4726 if(i_regmap[hr]>0) {
4727 if(i_regmap[hr]!=CCREG) {
4728 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
4729 if((i_dirty>>hr)&1) {
4730 assert(i_regmap[hr]<64);
4731 emit_storereg(i_regmap[hr],hr);
4740 // Load all registers (except cycle count)
4741 static void load_all_regs(const signed char i_regmap[])
4744 for(hr=0;hr<HOST_REGS;hr++) {
4745 if(hr!=EXCLUDE_REG) {
4746 if(i_regmap[hr]==0) {
4750 if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
4752 emit_loadreg(i_regmap[hr],hr);
4758 // Load all current registers also needed by next instruction
4759 static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
4762 for(hr=0;hr<HOST_REGS;hr++) {
4763 if(hr!=EXCLUDE_REG) {
4764 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4765 if(i_regmap[hr]==0) {
4769 if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
4771 emit_loadreg(i_regmap[hr],hr);
4778 // Load all regs, storing cycle count if necessary
4779 static void load_regs_entry(int t)
4782 if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4783 else if(cinfo[t].ccadj) emit_addimm(HOST_CCREG,-cinfo[t].ccadj,HOST_CCREG);
4784 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4785 emit_storereg(CCREG,HOST_CCREG);
4788 for(hr=0;hr<HOST_REGS;hr++) {
4789 if(regs[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
4790 if(regs[t].regmap_entry[hr]==0) {
4793 else if(regs[t].regmap_entry[hr]!=CCREG)
4795 emit_loadreg(regs[t].regmap_entry[hr],hr);
4801 // Store dirty registers prior to branch
4802 static void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4804 if(internal_branch(addr))
4806 int t=(addr-start)>>2;
4808 for(hr=0;hr<HOST_REGS;hr++) {
4809 if(hr!=EXCLUDE_REG) {
4810 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4811 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
4812 if((i_dirty>>hr)&1) {
4813 assert(i_regmap[hr]<64);
4814 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4815 emit_storereg(i_regmap[hr],hr);
4824 // Branch out of this block, write out all dirty regs
4825 wb_dirtys(i_regmap,i_dirty);
4829 // Load all needed registers for branch target
4830 static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4832 //if(addr>=start && addr<(start+slen*4))
4833 if(internal_branch(addr))
4835 int t=(addr-start)>>2;
4837 // Store the cycle count before loading something else
4838 if(i_regmap[HOST_CCREG]!=CCREG) {
4839 assert(i_regmap[HOST_CCREG]==-1);
4841 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4842 emit_storereg(CCREG,HOST_CCREG);
4845 for(hr=0;hr<HOST_REGS;hr++) {
4846 if(hr!=EXCLUDE_REG&®s[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
4847 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4848 if(regs[t].regmap_entry[hr]==0) {
4851 else if(regs[t].regmap_entry[hr]!=CCREG)
4853 emit_loadreg(regs[t].regmap_entry[hr],hr);
4861 static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4863 if(addr>=start && addr<start+slen*4-4)
4865 int t=(addr-start)>>2;
4867 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4868 for(hr=0;hr<HOST_REGS;hr++)
4872 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4874 if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4881 if(i_regmap[hr]<TEMPREG)
4883 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4886 else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4892 else // Same register but is it 32-bit or dirty?
4895 if(!((regs[t].dirty>>hr)&1))
4899 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4901 //printf("%x: dirty no match\n",addr);
4909 // Delay slots are not valid branch targets
4910 //if(t>0&&(dops[t-1].is_jump) return 0;
4911 // Delay slots require additional processing, so do not match
4912 if(dops[t].is_ds) return 0;
4917 for(hr=0;hr<HOST_REGS;hr++)
4923 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4938 static void drc_dbg_emit_do_cmp(int i, int ccadj_)
4940 extern void do_insn_cmp();
4942 u_int hr, reglist = get_host_reglist(regs[i].regmap);
4943 reglist |= get_host_reglist(regs[i].regmap_entry);
4944 reglist &= DRC_DBG_REGMASK;
4946 assem_debug("//do_insn_cmp %08x\n", start+i*4);
4948 // write out changed consts to match the interpreter
4949 if (i > 0 && !dops[i].bt) {
4950 for (hr = 0; hr < HOST_REGS; hr++) {
4951 int reg = regs[i].regmap_entry[hr]; // regs[i-1].regmap[hr];
4952 if (hr == EXCLUDE_REG || reg <= 0)
4954 if (!((regs[i-1].isconst >> hr) & 1))
4956 if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
4958 emit_movimm(constmap[i-1][hr],0);
4959 emit_storereg(reg, 0);
4962 emit_movimm(start+i*4,0);
4963 emit_writeword(0,&pcaddr);
4964 int cc = get_reg(regs[i].regmap_entry, CCREG);
4966 emit_loadreg(CCREG, cc = 0);
4967 emit_addimm(cc, ccadj_, 0);
4968 emit_writeword(0, &psxRegs.cycle);
4969 emit_far_call(do_insn_cmp);
4970 //emit_readword(&cycle,0);
4971 //emit_addimm(0,2,0);
4972 //emit_writeword(0,&cycle);
4974 restore_regs(reglist);
4975 assem_debug("\\\\do_insn_cmp\n");
4978 #define drc_dbg_emit_do_cmp(x,y)
4981 // Used when a branch jumps into the delay slot of another branch
4982 static void ds_assemble_entry(int i)
4984 int t = (cinfo[i].ba - start) >> 2;
4985 int ccadj_ = -CLOCK_ADJUST(1);
4987 instr_addr[t] = out;
4988 assem_debug("Assemble delay slot at %x\n",cinfo[i].ba);
4989 assem_debug("<->\n");
4990 drc_dbg_emit_do_cmp(t, ccadj_);
4991 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG)
4992 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
4993 load_regs(regs[t].regmap_entry,regs[t].regmap,dops[t].rs1,dops[t].rs2);
4994 address_generation(t,®s[t],regs[t].regmap_entry);
4995 if (ram_offset && (dops[t].is_load || dops[t].is_store))
4996 load_reg(regs[t].regmap_entry,regs[t].regmap,ROREG);
4997 if (dops[t].is_store)
4998 load_reg(regs[t].regmap_entry,regs[t].regmap,INVCP);
5000 switch (dops[t].itype) {
5008 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
5011 assemble(t, ®s[t], ccadj_);
5013 store_regs_bt(regs[t].regmap,regs[t].dirty,cinfo[i].ba+4);
5014 load_regs_bt(regs[t].regmap,regs[t].dirty,cinfo[i].ba+4);
5015 if(internal_branch(cinfo[i].ba+4))
5016 assem_debug("branch: internal\n");
5018 assem_debug("branch: external\n");
5019 assert(internal_branch(cinfo[i].ba+4));
5020 add_to_linker(out,cinfo[i].ba+4,internal_branch(cinfo[i].ba+4));
5024 // Load 2 immediates optimizing for small code size
5025 static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
5027 emit_movimm(imm1,rt1);
5028 emit_movimm_from(imm1,rt1,imm2,rt2);
5031 static void do_cc(int i, const signed char i_regmap[], int *adj,
5032 int addr, int taken, int invert)
5034 int count, count_plus2;
5038 if(dops[i].itype==RJUMP)
5042 //if(cinfo[i].ba>=start && cinfo[i].ba<(start+slen*4))
5043 if(internal_branch(cinfo[i].ba))
5045 t=(cinfo[i].ba-start)>>2;
5046 if(dops[t].is_ds) *adj=-CLOCK_ADJUST(1); // Branch into delay slot adds an extra cycle
5047 else *adj=cinfo[t].ccadj;
5053 count = cinfo[i].ccadj;
5054 count_plus2 = count + CLOCK_ADJUST(2);
5055 if(taken==TAKEN && i==(cinfo[i].ba-start)>>2 && source[i+1]==0) {
5057 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
5059 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
5060 emit_andimm(HOST_CCREG,3,HOST_CCREG);
5064 else if(*adj==0||invert) {
5065 int cycles = count_plus2;
5070 if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
5071 cycles=*adj+count+2-*adj;
5074 emit_addimm_and_set_flags(cycles, HOST_CCREG);
5080 emit_cmpimm(HOST_CCREG, -count_plus2);
5084 add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:count_plus2,i,addr,taken,0);
5087 static void do_ccstub(int n)
5090 assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
5091 set_jump_target(stubs[n].addr, out);
5093 if (stubs[n].d != TAKEN) {
5094 wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
5097 if(internal_branch(cinfo[i].ba))
5098 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5102 // Save PC as return address
5103 emit_movimm(stubs[n].c,0);
5104 emit_writeword(0,&pcaddr);
5108 // Return address depends on which way the branch goes
5109 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
5111 int s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5112 int s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
5118 else if(dops[i].rs2==0)
5123 #ifdef DESTRUCTIVE_WRITEBACK
5125 if((branch_regs[i].dirty>>s1l)&&1)
5126 emit_loadreg(dops[i].rs1,s1l);
5129 if((branch_regs[i].dirty>>s1l)&1)
5130 emit_loadreg(dops[i].rs2,s1l);
5133 if((branch_regs[i].dirty>>s2l)&1)
5134 emit_loadreg(dops[i].rs2,s2l);
5137 int addr=-1,alt=-1,ntaddr=-1;
5140 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5141 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5142 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5150 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5151 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5152 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5158 if ((dops[i].opcode & 0x3e) == 6) // BLEZ/BGTZ needs another register
5162 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5163 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5164 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5170 assert(hr<HOST_REGS);
5172 if (dops[i].opcode == 4) // BEQ
5174 #ifdef HAVE_CMOV_IMM
5175 if(s2l>=0) emit_cmp(s1l,s2l);
5176 else emit_test(s1l,s1l);
5177 emit_cmov2imm_e_ne_compact(cinfo[i].ba,start+i*4+8,addr);
5179 emit_mov2imm_compact(cinfo[i].ba,addr,start+i*4+8,alt);
5180 if(s2l>=0) emit_cmp(s1l,s2l);
5181 else emit_test(s1l,s1l);
5182 emit_cmovne_reg(alt,addr);
5185 else if (dops[i].opcode == 5) // BNE
5187 #ifdef HAVE_CMOV_IMM
5188 if(s2l>=0) emit_cmp(s1l,s2l);
5189 else emit_test(s1l,s1l);
5190 emit_cmov2imm_e_ne_compact(start+i*4+8,cinfo[i].ba,addr);
5192 emit_mov2imm_compact(start+i*4+8,addr,cinfo[i].ba,alt);
5193 if(s2l>=0) emit_cmp(s1l,s2l);
5194 else emit_test(s1l,s1l);
5195 emit_cmovne_reg(alt,addr);
5198 else if (dops[i].opcode == 6) // BLEZ
5200 //emit_movimm(cinfo[i].ba,alt);
5201 //emit_movimm(start+i*4+8,addr);
5202 emit_mov2imm_compact(cinfo[i].ba,alt,start+i*4+8,addr);
5204 emit_cmovl_reg(alt,addr);
5206 else if (dops[i].opcode == 7) // BGTZ
5208 //emit_movimm(cinfo[i].ba,addr);
5209 //emit_movimm(start+i*4+8,ntaddr);
5210 emit_mov2imm_compact(cinfo[i].ba,addr,start+i*4+8,ntaddr);
5212 emit_cmovl_reg(ntaddr,addr);
5214 else if (dops[i].itype == SJUMP) // BLTZ/BGEZ
5216 //emit_movimm(cinfo[i].ba,alt);
5217 //emit_movimm(start+i*4+8,addr);
5219 emit_mov2imm_compact(cinfo[i].ba,
5220 (dops[i].opcode2 & 1) ? addr : alt, start + i*4 + 8,
5221 (dops[i].opcode2 & 1) ? alt : addr);
5223 emit_cmovs_reg(alt,addr);
5226 emit_movimm((dops[i].opcode2 & 1) ? cinfo[i].ba : start + i*4 + 8, addr);
5228 emit_writeword(addr, &pcaddr);
5231 if(dops[i].itype==RJUMP)
5233 int r=get_reg(branch_regs[i].regmap,dops[i].rs1);
5234 if (ds_writes_rjump_rs(i)) {
5235 r=get_reg(branch_regs[i].regmap,RTEMP);
5237 emit_writeword(r,&pcaddr);
5239 else {SysPrintf("Unknown branch type in do_ccstub\n");abort();}
5241 // Update cycle count
5242 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5243 if(stubs[n].a) emit_addimm(HOST_CCREG,(int)stubs[n].a,HOST_CCREG);
5244 emit_far_call(cc_interrupt);
5245 if(stubs[n].a) emit_addimm(HOST_CCREG,-(int)stubs[n].a,HOST_CCREG);
5246 if(stubs[n].d==TAKEN) {
5247 if(internal_branch(cinfo[i].ba))
5248 load_needed_regs(branch_regs[i].regmap,regs[(cinfo[i].ba-start)>>2].regmap_entry);
5249 else if(dops[i].itype==RJUMP) {
5250 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5251 emit_readword(&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5253 emit_loadreg(dops[i].rs1,get_reg(branch_regs[i].regmap,dops[i].rs1));
5255 }else if(stubs[n].d==NOTTAKEN) {
5256 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5257 else load_all_regs(branch_regs[i].regmap);
5259 load_all_regs(branch_regs[i].regmap);
5261 if (stubs[n].retaddr)
5262 emit_jmp(stubs[n].retaddr);
5264 do_jump_vaddr(stubs[n].e);
5267 static void add_to_linker(void *addr, u_int target, int is_internal)
5269 assert(linkcount < ARRAY_SIZE(link_addr));
5270 link_addr[linkcount].addr = addr;
5271 link_addr[linkcount].target = target;
5272 link_addr[linkcount].internal = is_internal;
5276 static void ujump_assemble_write_ra(int i)
5279 unsigned int return_address;
5280 rt=get_reg(branch_regs[i].regmap,31);
5281 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5283 return_address=start+i*4+8;
5286 if(internal_branch(return_address)&&dops[i+1].rt1!=31) {
5287 int temp=-1; // note: must be ds-safe
5291 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5292 else emit_movimm(return_address,rt);
5300 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5303 if (!((regs[i].loadedconst >> rt) & 1))
5304 emit_movimm(return_address, rt); // PC into link register
5306 emit_prefetch(hash_table_get(return_address));
5312 static void ujump_assemble(int i, const struct regstat *i_regs)
5314 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5315 address_generation(i+1,i_regs,regs[i].regmap_entry);
5317 int temp=get_reg(branch_regs[i].regmap,PTEMP);
5318 if(dops[i].rt1==31&&temp>=0)
5320 signed char *i_regmap=i_regs->regmap;
5321 int return_address=start+i*4+8;
5322 if(get_reg(branch_regs[i].regmap,31)>0)
5323 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5326 if (dops[i].rt1 == 31)
5327 ujump_assemble_write_ra(i); // writeback ra for DS
5328 ds_assemble(i+1,i_regs);
5329 uint64_t bc_unneeded=branch_regs[i].u;
5330 bc_unneeded|=1|(1LL<<dops[i].rt1);
5331 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5332 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5334 cc=get_reg(branch_regs[i].regmap,CCREG);
5335 assert(cc==HOST_CCREG);
5336 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5338 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
5340 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5341 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5342 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5343 if(internal_branch(cinfo[i].ba))
5344 assem_debug("branch: internal\n");
5346 assem_debug("branch: external\n");
5347 if (internal_branch(cinfo[i].ba) && dops[(cinfo[i].ba-start)>>2].is_ds) {
5348 ds_assemble_entry(i);
5351 add_to_linker(out,cinfo[i].ba,internal_branch(cinfo[i].ba));
5356 static void rjump_assemble_write_ra(int i)
5358 int rt,return_address;
5359 rt=get_reg_w(branch_regs[i].regmap, dops[i].rt1);
5360 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5362 return_address=start+i*4+8;
5366 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5369 if (!((regs[i].loadedconst >> rt) & 1))
5370 emit_movimm(return_address, rt); // PC into link register
5372 emit_prefetch(hash_table_get(return_address));
5376 static void rjump_assemble(int i, const struct regstat *i_regs)
5380 rs=get_reg(branch_regs[i].regmap,dops[i].rs1);
5382 if (ds_writes_rjump_rs(i)) {
5383 // Delay slot abuse, make a copy of the branch address register
5384 temp=get_reg(branch_regs[i].regmap,RTEMP);
5386 assert(regs[i].regmap[temp]==RTEMP);
5390 address_generation(i+1,i_regs,regs[i].regmap_entry);
5394 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5395 signed char *i_regmap=i_regs->regmap;
5396 int return_address=start+i*4+8;
5397 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5402 if(dops[i].rs1==31) {
5403 int rh=get_reg(regs[i].regmap,RHASH);
5404 if(rh>=0) do_preload_rhash(rh);
5407 if (dops[i].rt1 != 0)
5408 rjump_assemble_write_ra(i);
5409 ds_assemble(i+1,i_regs);
5410 uint64_t bc_unneeded=branch_regs[i].u;
5411 bc_unneeded|=1|(1LL<<dops[i].rt1);
5412 bc_unneeded&=~(1LL<<dops[i].rs1);
5413 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5414 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,CCREG);
5415 cc=get_reg(branch_regs[i].regmap,CCREG);
5416 assert(cc==HOST_CCREG);
5419 int rh=get_reg(branch_regs[i].regmap,RHASH);
5420 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5421 if(dops[i].rs1==31) {
5422 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5423 do_preload_rhtbl(ht);
5427 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
5428 #ifdef DESTRUCTIVE_WRITEBACK
5429 if((branch_regs[i].dirty>>rs)&1) {
5430 if(dops[i].rs1!=dops[i+1].rt1&&dops[i].rs1!=dops[i+1].rt2) {
5431 emit_loadreg(dops[i].rs1,rs);
5436 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
5439 if(dops[i].rs1==31) {
5440 do_miniht_load(ht,rh);
5443 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5444 //if(adj) emit_addimm(cc,2*(cinfo[i].ccadj+2-adj),cc); // ??? - Shouldn't happen
5446 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
5447 add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
5448 if (dops[i+1].itype == RFE)
5449 // special case for RFE
5453 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
5455 if(dops[i].rs1==31) {
5456 do_miniht_jump(rs,rh,ht);
5463 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5464 if(dops[i].rt1!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5468 static void cjump_assemble(int i, const struct regstat *i_regs)
5470 const signed char *i_regmap = i_regs->regmap;
5473 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5474 assem_debug("match=%d\n",match);
5476 int unconditional=0,nop=0;
5478 int internal=internal_branch(cinfo[i].ba);
5479 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5480 if(!match) invert=1;
5481 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5482 if(i>(cinfo[i].ba-start)>>2) invert=1;
5485 invert=1; // because of near cond. branches
5489 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5490 s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
5493 s1l=get_reg(i_regmap,dops[i].rs1);
5494 s2l=get_reg(i_regmap,dops[i].rs2);
5496 if(dops[i].rs1==0&&dops[i].rs2==0)
5498 if(dops[i].opcode&1) nop=1;
5499 else unconditional=1;
5500 //assert(dops[i].opcode!=5);
5501 //assert(dops[i].opcode!=7);
5502 //assert(dops[i].opcode!=0x15);
5503 //assert(dops[i].opcode!=0x17);
5505 else if(dops[i].rs1==0)
5510 else if(dops[i].rs2==0)
5516 // Out of order execution (delay slot first)
5518 address_generation(i+1,i_regs,regs[i].regmap_entry);
5519 ds_assemble(i+1,i_regs);
5521 uint64_t bc_unneeded=branch_regs[i].u;
5522 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
5524 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5525 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs2);
5526 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5527 cc=get_reg(branch_regs[i].regmap,CCREG);
5528 assert(cc==HOST_CCREG);
5530 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5531 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?cinfo[i].ba:-1,unconditional);
5532 //assem_debug("cycle count (adj)\n");
5534 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5535 if(i!=(cinfo[i].ba-start)>>2 || source[i+1]!=0) {
5536 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5537 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5539 assem_debug("branch: internal\n");
5541 assem_debug("branch: external\n");
5542 if (internal && dops[(cinfo[i].ba-start)>>2].is_ds) {
5543 ds_assemble_entry(i);
5546 add_to_linker(out,cinfo[i].ba,internal);
5549 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5550 if(((u_int)out)&7) emit_addnop(0);
5555 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5558 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5561 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5562 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5563 if(adj&&!invert) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5565 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5567 if(dops[i].opcode==4) // BEQ
5569 if(s2l>=0) emit_cmp(s1l,s2l);
5570 else emit_test(s1l,s1l);
5575 add_to_linker(out,cinfo[i].ba,internal);
5579 if(dops[i].opcode==5) // BNE
5581 if(s2l>=0) emit_cmp(s1l,s2l);
5582 else emit_test(s1l,s1l);
5587 add_to_linker(out,cinfo[i].ba,internal);
5591 if(dops[i].opcode==6) // BLEZ
5598 add_to_linker(out,cinfo[i].ba,internal);
5602 if(dops[i].opcode==7) // BGTZ
5609 add_to_linker(out,cinfo[i].ba,internal);
5614 if(taken) set_jump_target(taken, out);
5615 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5616 if (match && (!internal || !dops[(cinfo[i].ba-start)>>2].is_ds)) {
5618 emit_addimm(cc,-adj,cc);
5619 add_to_linker(out,cinfo[i].ba,internal);
5622 add_to_linker(out,cinfo[i].ba,internal*2);
5628 if(adj) emit_addimm(cc,-adj,cc);
5629 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5630 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5632 assem_debug("branch: internal\n");
5634 assem_debug("branch: external\n");
5635 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5636 ds_assemble_entry(i);
5639 add_to_linker(out,cinfo[i].ba,internal);
5643 set_jump_target(nottaken, out);
5646 if(nottaken1) set_jump_target(nottaken1, out);
5648 if(!invert) emit_addimm(cc,adj,cc);
5650 } // (!unconditional)
5654 // In-order execution (branch first)
5655 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5656 if(!unconditional&&!nop) {
5657 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5659 if((dops[i].opcode&0x2f)==4) // BEQ
5661 if(s2l>=0) emit_cmp(s1l,s2l);
5662 else emit_test(s1l,s1l);
5666 if((dops[i].opcode&0x2f)==5) // BNE
5668 if(s2l>=0) emit_cmp(s1l,s2l);
5669 else emit_test(s1l,s1l);
5673 if((dops[i].opcode&0x2f)==6) // BLEZ
5679 if((dops[i].opcode&0x2f)==7) // BGTZ
5685 } // if(!unconditional)
5687 uint64_t ds_unneeded=branch_regs[i].u;
5688 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
5692 if(taken) set_jump_target(taken, out);
5693 assem_debug("1:\n");
5694 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5696 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5697 address_generation(i+1,&branch_regs[i],0);
5699 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5700 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5701 ds_assemble(i+1,&branch_regs[i]);
5702 cc=get_reg(branch_regs[i].regmap,CCREG);
5704 emit_loadreg(CCREG,cc=HOST_CCREG);
5705 // CHECK: Is the following instruction (fall thru) allocated ok?
5707 assert(cc==HOST_CCREG);
5708 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5709 do_cc(i,i_regmap,&adj,cinfo[i].ba,TAKEN,0);
5710 assem_debug("cycle count (adj)\n");
5711 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5712 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5714 assem_debug("branch: internal\n");
5716 assem_debug("branch: external\n");
5717 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5718 ds_assemble_entry(i);
5721 add_to_linker(out,cinfo[i].ba,internal);
5726 if(!unconditional) {
5727 if(nottaken1) set_jump_target(nottaken1, out);
5728 set_jump_target(nottaken, out);
5729 assem_debug("2:\n");
5730 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5732 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5733 address_generation(i+1,&branch_regs[i],0);
5735 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5736 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5737 ds_assemble(i+1,&branch_regs[i]);
5738 cc=get_reg(branch_regs[i].regmap,CCREG);
5740 // Cycle count isn't in a register, temporarily load it then write it out
5741 emit_loadreg(CCREG,HOST_CCREG);
5742 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
5745 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5746 emit_storereg(CCREG,HOST_CCREG);
5749 cc=get_reg(i_regmap,CCREG);
5750 assert(cc==HOST_CCREG);
5751 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5754 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5760 static void sjump_assemble(int i, const struct regstat *i_regs)
5762 const signed char *i_regmap = i_regs->regmap;
5765 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5766 assem_debug("smatch=%d ooo=%d\n", match, dops[i].ooo);
5768 int unconditional=0,nevertaken=0;
5770 int internal=internal_branch(cinfo[i].ba);
5771 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5772 if(!match) invert=1;
5773 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5774 if(i>(cinfo[i].ba-start)>>2) invert=1;
5777 invert=1; // because of near cond. branches
5780 //if(dops[i].opcode2>=0x10) return; // FIXME (BxxZAL)
5781 //assert(dops[i].opcode2<0x10||dops[i].rs1==0); // FIXME (BxxZAL)
5784 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5787 s1l=get_reg(i_regmap,dops[i].rs1);
5791 if(dops[i].opcode2&1) unconditional=1;
5793 // These are never taken (r0 is never less than zero)
5794 //assert(dops[i].opcode2!=0);
5795 //assert(dops[i].opcode2!=2);
5796 //assert(dops[i].opcode2!=0x10);
5797 //assert(dops[i].opcode2!=0x12);
5801 // Out of order execution (delay slot first)
5803 address_generation(i+1,i_regs,regs[i].regmap_entry);
5804 ds_assemble(i+1,i_regs);
5806 uint64_t bc_unneeded=branch_regs[i].u;
5807 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
5809 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5810 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs1);
5811 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5812 if(dops[i].rt1==31) {
5813 int rt,return_address;
5814 rt=get_reg(branch_regs[i].regmap,31);
5815 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5817 // Save the PC even if the branch is not taken
5818 return_address=start+i*4+8;
5819 emit_movimm(return_address,rt); // PC into link register
5821 if(!nevertaken) emit_prefetch(hash_table_get(return_address));
5825 cc=get_reg(branch_regs[i].regmap,CCREG);
5826 assert(cc==HOST_CCREG);
5828 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5829 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?cinfo[i].ba:-1,unconditional);
5830 assem_debug("cycle count (adj)\n");
5832 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5833 if(i!=(cinfo[i].ba-start)>>2 || source[i+1]!=0) {
5834 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5835 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5837 assem_debug("branch: internal\n");
5839 assem_debug("branch: external\n");
5840 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5841 ds_assemble_entry(i);
5844 add_to_linker(out,cinfo[i].ba,internal);
5847 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5848 if(((u_int)out)&7) emit_addnop(0);
5852 else if(nevertaken) {
5853 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5856 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5859 void *nottaken = NULL;
5860 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5861 if(adj&&!invert) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5864 if ((dops[i].opcode2 & 1) == 0) // BLTZ/BLTZAL
5871 add_to_linker(out,cinfo[i].ba,internal);
5882 add_to_linker(out,cinfo[i].ba,internal);
5889 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5890 if (match && (!internal || !dops[(cinfo[i].ba - start) >> 2].is_ds)) {
5892 emit_addimm(cc,-adj,cc);
5893 add_to_linker(out,cinfo[i].ba,internal);
5896 add_to_linker(out,cinfo[i].ba,internal*2);
5902 if(adj) emit_addimm(cc,-adj,cc);
5903 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5904 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5906 assem_debug("branch: internal\n");
5908 assem_debug("branch: external\n");
5909 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5910 ds_assemble_entry(i);
5913 add_to_linker(out,cinfo[i].ba,internal);
5917 set_jump_target(nottaken, out);
5921 if(!invert) emit_addimm(cc,adj,cc);
5923 } // (!unconditional)
5927 // In-order execution (branch first)
5929 void *nottaken = NULL;
5930 if (!unconditional && !nevertaken) {
5932 emit_test(s1l, s1l);
5934 if (dops[i].rt1 == 31) {
5935 int rt, return_address;
5936 rt = get_reg(branch_regs[i].regmap,31);
5938 // Save the PC even if the branch is not taken
5939 return_address = start + i*4+8;
5940 emit_movimm(return_address, rt); // PC into link register
5942 emit_prefetch(hash_table_get(return_address));
5946 if (!unconditional && !nevertaken) {
5948 if (!(dops[i].opcode2 & 1)) // BLTZ/BLTZAL
5954 uint64_t ds_unneeded=branch_regs[i].u;
5955 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
5959 //assem_debug("1:\n");
5960 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5962 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5963 address_generation(i+1,&branch_regs[i],0);
5965 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5966 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5967 ds_assemble(i+1,&branch_regs[i]);
5968 cc=get_reg(branch_regs[i].regmap,CCREG);
5970 emit_loadreg(CCREG,cc=HOST_CCREG);
5971 // CHECK: Is the following instruction (fall thru) allocated ok?
5973 assert(cc==HOST_CCREG);
5974 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5975 do_cc(i,i_regmap,&adj,cinfo[i].ba,TAKEN,0);
5976 assem_debug("cycle count (adj)\n");
5977 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5978 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5980 assem_debug("branch: internal\n");
5982 assem_debug("branch: external\n");
5983 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5984 ds_assemble_entry(i);
5987 add_to_linker(out,cinfo[i].ba,internal);
5992 if(!unconditional) {
5995 set_jump_target(nottaken, out);
5997 assem_debug("1:\n");
5998 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5999 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
6000 address_generation(i+1,&branch_regs[i],0);
6002 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
6003 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
6004 ds_assemble(i+1,&branch_regs[i]);
6005 cc=get_reg(branch_regs[i].regmap,CCREG);
6007 // Cycle count isn't in a register, temporarily load it then write it out
6008 emit_loadreg(CCREG,HOST_CCREG);
6009 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
6012 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
6013 emit_storereg(CCREG,HOST_CCREG);
6016 cc=get_reg(i_regmap,CCREG);
6017 assert(cc==HOST_CCREG);
6018 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
6021 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
6027 static void check_regmap(signed char *regmap)
6031 for (i = 0; i < HOST_REGS; i++) {
6034 for (j = i + 1; j < HOST_REGS; j++)
6035 assert(regmap[i] != regmap[j]);
6041 #include <inttypes.h>
6042 static char insn[MAXBLOCK][10];
6044 #define set_mnemonic(i_, n_) \
6045 strcpy(insn[i_], n_)
6047 void print_regmap(const char *name, const signed char *regmap)
6051 fputs(name, stdout);
6052 for (i = 0; i < HOST_REGS; i++) {
6055 l = snprintf(buf, sizeof(buf), "$%d", regmap[i]);
6059 printf(" r%d=%s", i, buf);
6061 fputs("\n", stdout);
6065 void disassemble_inst(int i)
6067 if (dops[i].bt) printf("*"); else printf(" ");
6068 switch(dops[i].itype) {
6070 printf (" %x: %s %8x\n",start+i*4,insn[i],cinfo[i].ba);break;
6072 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):cinfo[i].ba);break;
6074 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6076 if (dops[i].opcode2 == 9 && dops[i].rt1 != 31)
6077 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1);
6079 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
6082 if(dops[i].opcode==0xf) //LUI
6083 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],dops[i].rt1,cinfo[i].imm&0xffff);
6085 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6089 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6093 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rs2,dops[i].rs1,cinfo[i].imm);
6097 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,dops[i].rs2);
6100 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2);
6103 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6106 if((dops[i].opcode2&0x1d)==0x10)
6107 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rt1);
6108 else if((dops[i].opcode2&0x1d)==0x11)
6109 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
6111 printf (" %x: %s\n",start+i*4,insn[i]);
6114 if(dops[i].opcode2==0)
6115 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC0
6116 else if(dops[i].opcode2==4)
6117 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC0
6118 else printf (" %x: %s\n",start+i*4,insn[i]);
6121 if(dops[i].opcode2<3)
6122 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC2
6123 else if(dops[i].opcode2>3)
6124 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC2
6125 else printf (" %x: %s\n",start+i*4,insn[i]);
6128 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,cinfo[i].imm);
6131 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6134 //printf (" %s %8x\n",insn[i],source[i]);
6135 printf (" %x: %s\n",start+i*4,insn[i]);
6137 #ifndef REGMAP_PRINT
6140 printf("D: %x WD: %x U: %"PRIx64" hC: %x hWC: %x hLC: %x\n",
6141 regs[i].dirty, regs[i].wasdirty, unneeded_reg[i],
6142 regs[i].isconst, regs[i].wasconst, regs[i].loadedconst);
6143 print_regmap("pre: ", regmap_pre[i]);
6144 print_regmap("entry: ", regs[i].regmap_entry);
6145 print_regmap("map: ", regs[i].regmap);
6146 if (dops[i].is_jump) {
6147 print_regmap("bentry:", branch_regs[i].regmap_entry);
6148 print_regmap("bmap: ", branch_regs[i].regmap);
6152 #define set_mnemonic(i_, n_)
6153 static void disassemble_inst(int i) {}
6156 #define DRC_TEST_VAL 0x74657374
6158 static noinline void new_dynarec_test(void)
6160 int (*testfunc)(void);
6165 // check structure linkage
6166 if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
6168 SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
6171 SysPrintf("(%p) testing if we can run recompiled code @%p...\n",
6172 new_dynarec_test, out);
6173 ((volatile u_int *)NDRC_WRITE_OFFSET(out))[0]++; // make the cache dirty
6175 for (i = 0; i < ARRAY_SIZE(ret); i++) {
6176 out = ndrc->translation_cache;
6177 beginning = start_block();
6178 emit_movimm(DRC_TEST_VAL + i, 0); // test
6181 end_block(beginning);
6182 testfunc = beginning;
6183 ret[i] = testfunc();
6186 if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
6187 SysPrintf("test passed.\n");
6189 SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
6190 out = ndrc->translation_cache;
6193 // clear the state completely, instead of just marking
6194 // things invalid like invalidate_all_pages() does
6195 void new_dynarec_clear_full(void)
6198 out = ndrc->translation_cache;
6199 memset(invalid_code,1,sizeof(invalid_code));
6200 memset(hash_table,0xff,sizeof(hash_table));
6201 memset(mini_ht,-1,sizeof(mini_ht));
6202 memset(shadow,0,sizeof(shadow));
6204 expirep = EXPIRITY_OFFSET;
6205 pending_exception=0;
6208 inv_code_start=inv_code_end=~0;
6211 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6212 blocks_clear(&blocks[n]);
6213 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6217 stat_clear(stat_blocks);
6218 stat_clear(stat_links);
6220 cycle_multiplier_old = Config.cycle_multiplier;
6221 new_dynarec_hacks_old = new_dynarec_hacks;
6224 void new_dynarec_init(void)
6226 SysPrintf("Init new dynarec, ndrc size %x\n", (int)sizeof(*ndrc));
6231 #ifdef BASE_ADDR_DYNAMIC
6233 sceBlock = getVMBlock(); //sceKernelAllocMemBlockForVM("code", sizeof(*ndrc));
6235 SysPrintf("sceKernelAllocMemBlockForVM failed: %x\n", sceBlock);
6236 int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
6238 SysPrintf("sceKernelGetMemBlockBase failed: %x\n", ret);
6239 sceKernelOpenVMDomain();
6240 sceClibPrintf("translation_cache = 0x%08lx\n ", (long)ndrc->translation_cache);
6241 #elif defined(_MSC_VER)
6242 ndrc = VirtualAlloc(NULL, sizeof(*ndrc), MEM_COMMIT | MEM_RESERVE,
6243 PAGE_EXECUTE_READWRITE);
6244 #elif defined(HAVE_LIBNX)
6245 Result rc = jitCreate(&g_jit, sizeof(*ndrc));
6247 SysPrintf("jitCreate failed: %08x\n", rc);
6248 SysPrintf("jitCreate: RX: %p RW: %p type: %d\n", g_jit.rx_addr, g_jit.rw_addr, g_jit.type);
6249 jitTransitionToWritable(&g_jit);
6250 ndrc = g_jit.rx_addr;
6251 ndrc_write_ofs = (char *)g_jit.rw_addr - (char *)ndrc;
6252 memset(NDRC_WRITE_OFFSET(&ndrc->tramp), 0, sizeof(ndrc->tramp));
6254 uintptr_t desired_addr = 0;
6255 int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
6256 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
6260 desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
6262 #ifdef TC_WRITE_OFFSET
6263 // mostly for testing
6264 fd = open("/dev/shm/pcsxr", O_CREAT | O_RDWR, 0600);
6265 ftruncate(fd, sizeof(*ndrc));
6266 void *mw = mmap(NULL, sizeof(*ndrc), PROT_READ | PROT_WRITE,
6267 (flags = MAP_SHARED), fd, 0);
6268 assert(mw != MAP_FAILED);
6269 prot = PROT_READ | PROT_EXEC;
6271 ndrc = mmap((void *)desired_addr, sizeof(*ndrc), prot, flags, fd, 0);
6272 if (ndrc == MAP_FAILED) {
6273 SysPrintf("mmap() failed: %s\n", strerror(errno));
6276 #ifdef TC_WRITE_OFFSET
6277 ndrc_write_ofs = (char *)mw - (char *)ndrc;
6281 #ifndef NO_WRITE_EXEC
6282 // not all systems allow execute in data segment by default
6283 // size must be 4K aligned for 3DS?
6284 if (mprotect(ndrc, sizeof(*ndrc),
6285 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
6286 SysPrintf("mprotect() failed: %s\n", strerror(errno));
6289 out = ndrc->translation_cache;
6290 new_dynarec_clear_full();
6292 // Copy this into local area so we don't have to put it in every literal pool
6293 invc_ptr=invalid_code;
6297 ram_offset = (uintptr_t)psxM - 0x80000000;
6299 SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
6300 SysPrintf("Mapped (RAM/scrp/ROM/LUTs/TC):\n");
6301 SysPrintf("%p/%p/%p/%p/%p\n", psxM, psxH, psxR, mem_rtab, out);
6304 void new_dynarec_cleanup(void)
6307 #ifdef BASE_ADDR_DYNAMIC
6309 // sceBlock is managed by retroarch's bootstrap code
6310 //sceKernelFreeMemBlock(sceBlock);
6312 #elif defined(HAVE_LIBNX)
6316 if (munmap(ndrc, sizeof(*ndrc)) < 0)
6317 SysPrintf("munmap() failed\n");
6321 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6322 blocks_clear(&blocks[n]);
6323 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6327 stat_clear(stat_blocks);
6328 stat_clear(stat_links);
6329 new_dynarec_print_stats();
6332 static u_int *get_source_start(u_int addr, u_int *limit)
6334 if (addr < 0x00800000
6335 || (0x80000000 <= addr && addr < 0x80800000)
6336 || (0xa0000000 <= addr && addr < 0xa0800000))
6338 // used for BIOS calls mostly?
6339 *limit = (addr & 0xa0600000) + 0x00200000;
6340 return (u_int *)(psxM + (addr & 0x1fffff));
6342 else if (!Config.HLE && (
6343 /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
6344 (0xbfc00000 <= addr && addr < 0xbfc80000)))
6346 // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
6347 // but timings in PCSX are too tied to the interpreter's 2-per-insn assumption
6348 if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
6349 cycle_multiplier_active = 200;
6351 *limit = (addr & 0xfff00000) | 0x80000;
6352 return (u_int *)((u_char *)psxR + (addr&0x7ffff));
6357 static u_int scan_for_ret(u_int addr)
6362 mem = get_source_start(addr, &limit);
6366 if (limit > addr + 0x1000)
6367 limit = addr + 0x1000;
6368 for (; addr < limit; addr += 4, mem++) {
6369 if (*mem == 0x03e00008) // jr $ra
6375 struct savestate_block {
6380 static int addr_cmp(const void *p1_, const void *p2_)
6382 const struct savestate_block *p1 = p1_, *p2 = p2_;
6383 return p1->addr - p2->addr;
6386 int new_dynarec_save_blocks(void *save, int size)
6388 struct savestate_block *sblocks = save;
6389 int maxcount = size / sizeof(sblocks[0]);
6390 struct savestate_block tmp_blocks[1024];
6391 struct block_info *block;
6392 int p, s, d, o, bcnt;
6396 for (p = 0; p < ARRAY_SIZE(blocks); p++) {
6398 for (block = blocks[p]; block != NULL; block = block->next) {
6399 if (block->is_dirty)
6401 tmp_blocks[bcnt].addr = block->start;
6402 tmp_blocks[bcnt].regflags = block->reg_sv_flags;
6407 qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
6409 addr = tmp_blocks[0].addr;
6410 for (s = d = 0; s < bcnt; s++) {
6411 if (tmp_blocks[s].addr < addr)
6413 if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
6414 tmp_blocks[d++] = tmp_blocks[s];
6415 addr = scan_for_ret(tmp_blocks[s].addr);
6418 if (o + d > maxcount)
6420 memcpy(&sblocks[o], tmp_blocks, d * sizeof(sblocks[0]));
6424 return o * sizeof(sblocks[0]);
6427 void new_dynarec_load_blocks(const void *save, int size)
6429 const struct savestate_block *sblocks = save;
6430 int count = size / sizeof(sblocks[0]);
6431 struct block_info *block;
6432 u_int regs_save[32];
6437 // restore clean blocks, if any
6438 for (page = 0, b = i = 0; page < ARRAY_SIZE(blocks); page++) {
6439 for (block = blocks[page]; block != NULL; block = block->next, b++) {
6440 if (!block->is_dirty)
6442 assert(block->source && block->copy);
6443 if (memcmp(block->source, block->copy, block->len))
6446 // see try_restore_block
6447 block->is_dirty = 0;
6448 mark_invalid_code(block->start, block->len, 0);
6452 inv_debug("load_blocks: %d/%d clean blocks\n", i, b);
6454 // change GPRs for speculation to at least partially work..
6455 memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
6456 for (i = 1; i < 32; i++)
6457 psxRegs.GPR.r[i] = 0x80000000;
6459 for (b = 0; b < count; b++) {
6460 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
6462 psxRegs.GPR.r[i] = 0x1f800000;
6465 ndrc_get_addr_ht(sblocks[b].addr);
6467 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
6469 psxRegs.GPR.r[i] = 0x80000000;
6473 memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
6476 void new_dynarec_print_stats(void)
6479 printf("cc %3d,%3d,%3d lu%6d,%3d,%3d c%3d inv%3d,%3d tc_offs %zu b %u,%u\n",
6480 stat_bc_pre, stat_bc_direct, stat_bc_restore,
6481 stat_ht_lookups, stat_jump_in_lookups, stat_restore_tries,
6482 stat_restore_compares, stat_inv_addr_calls, stat_inv_hits,
6483 out - ndrc->translation_cache, stat_blocks, stat_links);
6484 stat_bc_direct = stat_bc_pre = stat_bc_restore =
6485 stat_ht_lookups = stat_jump_in_lookups = stat_restore_tries =
6486 stat_restore_compares = stat_inv_addr_calls = stat_inv_hits = 0;
6490 static int apply_hacks(void)
6493 if (HACK_ENABLED(NDHACK_NO_COMPAT_HACKS))
6495 /* special hack(s) */
6496 for (i = 0; i < slen - 4; i++)
6498 // lui a4, 0xf200; jal <rcnt_read>; addu a0, 2; slti v0, 28224
6499 if (source[i] == 0x3c04f200 && dops[i+1].itype == UJUMP
6500 && source[i+2] == 0x34840002 && dops[i+3].opcode == 0x0a
6501 && cinfo[i+3].imm == 0x6e40 && dops[i+3].rs1 == 2)
6503 SysPrintf("PE2 hack @%08x\n", start + (i+3)*4);
6504 dops[i + 3].itype = NOP;
6508 if (i > 10 && source[i-1] == 0 && source[i-2] == 0x03e00008
6509 && source[i-4] == 0x8fbf0018 && source[i-6] == 0x00c0f809
6510 && dops[i-7].itype == STORE)
6513 if (dops[i].itype == IMM16)
6515 // swl r2, 15(r6); swr r2, 12(r6); sw r6, *; jalr r6
6516 if (dops[i].itype == STORELR && dops[i].rs1 == 6
6517 && dops[i-1].itype == STORELR && dops[i-1].rs1 == 6)
6519 SysPrintf("F1 hack from %08x, old dst %08x\n", start, hack_addr);
6527 static int is_ld_use_hazard(int ld_rt, const struct decoded_insn *op)
6529 return ld_rt != 0 && (ld_rt == op->rs1 || ld_rt == op->rs2)
6530 && op->itype != LOADLR && op->itype != CJUMP && op->itype != SJUMP;
6533 static void force_intcall(int i)
6535 memset(&dops[i], 0, sizeof(dops[i]));
6536 dops[i].itype = INTCALL;
6537 dops[i].rs1 = CCREG;
6538 dops[i].is_exception = 1;
6542 static void disassemble_one(int i, u_int src)
6544 unsigned int type, op, op2, op3;
6545 memset(&dops[i], 0, sizeof(dops[i]));
6546 memset(&cinfo[i], 0, sizeof(cinfo[i]));
6549 dops[i].opcode = op = src >> 26;
6552 set_mnemonic(i, "???");
6555 case 0x00: set_mnemonic(i, "special");
6559 case 0x00: set_mnemonic(i, "SLL"); type=SHIFTIMM; break;
6560 case 0x02: set_mnemonic(i, "SRL"); type=SHIFTIMM; break;
6561 case 0x03: set_mnemonic(i, "SRA"); type=SHIFTIMM; break;
6562 case 0x04: set_mnemonic(i, "SLLV"); type=SHIFT; break;
6563 case 0x06: set_mnemonic(i, "SRLV"); type=SHIFT; break;
6564 case 0x07: set_mnemonic(i, "SRAV"); type=SHIFT; break;
6565 case 0x08: set_mnemonic(i, "JR"); type=RJUMP; break;
6566 case 0x09: set_mnemonic(i, "JALR"); type=RJUMP; break;
6567 case 0x0C: set_mnemonic(i, "SYSCALL"); type=SYSCALL; break;
6568 case 0x0D: set_mnemonic(i, "BREAK"); type=SYSCALL; break;
6569 case 0x10: set_mnemonic(i, "MFHI"); type=MOV; break;
6570 case 0x11: set_mnemonic(i, "MTHI"); type=MOV; break;
6571 case 0x12: set_mnemonic(i, "MFLO"); type=MOV; break;
6572 case 0x13: set_mnemonic(i, "MTLO"); type=MOV; break;
6573 case 0x18: set_mnemonic(i, "MULT"); type=MULTDIV; break;
6574 case 0x19: set_mnemonic(i, "MULTU"); type=MULTDIV; break;
6575 case 0x1A: set_mnemonic(i, "DIV"); type=MULTDIV; break;
6576 case 0x1B: set_mnemonic(i, "DIVU"); type=MULTDIV; break;
6577 case 0x20: set_mnemonic(i, "ADD"); type=ALU; break;
6578 case 0x21: set_mnemonic(i, "ADDU"); type=ALU; break;
6579 case 0x22: set_mnemonic(i, "SUB"); type=ALU; break;
6580 case 0x23: set_mnemonic(i, "SUBU"); type=ALU; break;
6581 case 0x24: set_mnemonic(i, "AND"); type=ALU; break;
6582 case 0x25: set_mnemonic(i, "OR"); type=ALU; break;
6583 case 0x26: set_mnemonic(i, "XOR"); type=ALU; break;
6584 case 0x27: set_mnemonic(i, "NOR"); type=ALU; break;
6585 case 0x2A: set_mnemonic(i, "SLT"); type=ALU; break;
6586 case 0x2B: set_mnemonic(i, "SLTU"); type=ALU; break;
6589 case 0x01: set_mnemonic(i, "regimm");
6591 op2 = (src >> 16) & 0x1f;
6594 case 0x10: set_mnemonic(i, "BLTZAL"); break;
6595 case 0x11: set_mnemonic(i, "BGEZAL"); break;
6598 set_mnemonic(i, "BGEZ");
6600 set_mnemonic(i, "BLTZ");
6603 case 0x02: set_mnemonic(i, "J"); type=UJUMP; break;
6604 case 0x03: set_mnemonic(i, "JAL"); type=UJUMP; break;
6605 case 0x04: set_mnemonic(i, "BEQ"); type=CJUMP; break;
6606 case 0x05: set_mnemonic(i, "BNE"); type=CJUMP; break;
6607 case 0x06: set_mnemonic(i, "BLEZ"); type=CJUMP; break;
6608 case 0x07: set_mnemonic(i, "BGTZ"); type=CJUMP; break;
6609 case 0x08: set_mnemonic(i, "ADDI"); type=IMM16; break;
6610 case 0x09: set_mnemonic(i, "ADDIU"); type=IMM16; break;
6611 case 0x0A: set_mnemonic(i, "SLTI"); type=IMM16; break;
6612 case 0x0B: set_mnemonic(i, "SLTIU"); type=IMM16; break;
6613 case 0x0C: set_mnemonic(i, "ANDI"); type=IMM16; break;
6614 case 0x0D: set_mnemonic(i, "ORI"); type=IMM16; break;
6615 case 0x0E: set_mnemonic(i, "XORI"); type=IMM16; break;
6616 case 0x0F: set_mnemonic(i, "LUI"); type=IMM16; break;
6617 case 0x10: set_mnemonic(i, "COP0");
6618 op2 = (src >> 21) & 0x1f;
6623 case 0x01: case 0x02: case 0x06: case 0x08: type = INTCALL; break;
6624 case 0x10: set_mnemonic(i, "RFE"); type=RFE; break;
6625 default: type = OTHER; break;
6633 set_mnemonic(i, "MFC0");
6634 rd = (src >> 11) & 0x1F;
6635 if (!(0x00000417u & (1u << rd)))
6638 case 0x04: set_mnemonic(i, "MTC0"); type=COP0; break;
6640 case 0x06: type = INTCALL; break;
6641 default: type = OTHER; break;
6644 case 0x11: set_mnemonic(i, "COP1");
6645 op2 = (src >> 21) & 0x1f;
6647 case 0x12: set_mnemonic(i, "COP2");
6648 op2 = (src >> 21) & 0x1f;
6651 if (gte_handlers[src & 0x3f] != NULL) {
6653 if (gte_regnames[src & 0x3f] != NULL)
6654 strcpy(insn[i], gte_regnames[src & 0x3f]);
6656 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", src & 0x3f);
6663 case 0x00: set_mnemonic(i, "MFC2"); type=COP2; break;
6664 case 0x02: set_mnemonic(i, "CFC2"); type=COP2; break;
6665 case 0x04: set_mnemonic(i, "MTC2"); type=COP2; break;
6666 case 0x06: set_mnemonic(i, "CTC2"); type=COP2; break;
6669 case 0x13: set_mnemonic(i, "COP3");
6670 op2 = (src >> 21) & 0x1f;
6672 case 0x20: set_mnemonic(i, "LB"); type=LOAD; break;
6673 case 0x21: set_mnemonic(i, "LH"); type=LOAD; break;
6674 case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; break;
6675 case 0x23: set_mnemonic(i, "LW"); type=LOAD; break;
6676 case 0x24: set_mnemonic(i, "LBU"); type=LOAD; break;
6677 case 0x25: set_mnemonic(i, "LHU"); type=LOAD; break;
6678 case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; break;
6679 case 0x28: set_mnemonic(i, "SB"); type=STORE; break;
6680 case 0x29: set_mnemonic(i, "SH"); type=STORE; break;
6681 case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; break;
6682 case 0x2B: set_mnemonic(i, "SW"); type=STORE; break;
6683 case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; break;
6684 case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; break;
6685 case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; break;
6687 if (Config.HLE && (src & 0x03ffffff) < ARRAY_SIZE(psxHLEt)) {
6688 set_mnemonic(i, "HLECALL");
6695 if (type == INTCALL)
6696 SysPrintf("NI %08x @%08x (%08x)\n", src, start + i*4, start);
6698 dops[i].opcode2=op2;
6699 /* Get registers/immediates */
6701 gte_rs[i]=gte_rt[i]=0;
6708 dops[i].rs1 = (src >> 21) & 0x1f;
6709 dops[i].rt1 = (src >> 16) & 0x1f;
6710 cinfo[i].imm = (short)src;
6714 dops[i].rs1 = (src >> 21) & 0x1f;
6715 dops[i].rs2 = (src >> 16) & 0x1f;
6716 cinfo[i].imm = (short)src;
6719 // LWL/LWR only load part of the register,
6720 // therefore the target register must be treated as a source too
6721 dops[i].rs1 = (src >> 21) & 0x1f;
6722 dops[i].rs2 = (src >> 16) & 0x1f;
6723 dops[i].rt1 = (src >> 16) & 0x1f;
6724 cinfo[i].imm = (short)src;
6727 if (op==0x0f) dops[i].rs1=0; // LUI instruction has no source register
6728 else dops[i].rs1 = (src >> 21) & 0x1f;
6730 dops[i].rt1 = (src >> 16) & 0x1f;
6731 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
6732 cinfo[i].imm = (unsigned short)src;
6734 cinfo[i].imm = (short)src;
6738 // The JAL instruction writes to r31.
6745 dops[i].rs1 = (src >> 21) & 0x1f;
6746 // The JALR instruction writes to rd.
6748 dops[i].rt1 = (src >> 11) & 0x1f;
6753 dops[i].rs1 = (src >> 21) & 0x1f;
6754 dops[i].rs2 = (src >> 16) & 0x1f;
6755 if(op&2) { // BGTZ/BLEZ
6760 dops[i].rs1 = (src >> 21) & 0x1f;
6761 dops[i].rs2 = CCREG;
6762 if (op2 == 0x10 || op2 == 0x11) { // BxxAL
6764 // NOTE: If the branch is not taken, r31 is still overwritten
6768 dops[i].rs1=(src>>21)&0x1f; // source
6769 dops[i].rs2=(src>>16)&0x1f; // subtract amount
6770 dops[i].rt1=(src>>11)&0x1f; // destination
6773 dops[i].rs1=(src>>21)&0x1f; // source
6774 dops[i].rs2=(src>>16)&0x1f; // divisor
6779 if(op2==0x10) dops[i].rs1=HIREG; // MFHI
6780 if(op2==0x11) dops[i].rt1=HIREG; // MTHI
6781 if(op2==0x12) dops[i].rs1=LOREG; // MFLO
6782 if(op2==0x13) dops[i].rt1=LOREG; // MTLO
6783 if((op2&0x1d)==0x10) dops[i].rt1=(src>>11)&0x1f; // MFxx
6784 if((op2&0x1d)==0x11) dops[i].rs1=(src>>21)&0x1f; // MTxx
6787 dops[i].rs1=(src>>16)&0x1f; // target of shift
6788 dops[i].rs2=(src>>21)&0x1f; // shift amount
6789 dops[i].rt1=(src>>11)&0x1f; // destination
6792 dops[i].rs1=(src>>16)&0x1f;
6794 dops[i].rt1=(src>>11)&0x1f;
6795 cinfo[i].imm=(src>>6)&0x1f;
6798 if(op2==0) dops[i].rt1=(src>>16)&0x1F; // MFC0
6799 if(op2==4) dops[i].rs1=(src>>16)&0x1F; // MTC0
6800 if(op2==4&&((src>>11)&0x1e)==12) dops[i].rs2=CCREG;
6803 if(op2<3) dops[i].rt1=(src>>16)&0x1F; // MFC2/CFC2
6804 if(op2>3) dops[i].rs1=(src>>16)&0x1F; // MTC2/CTC2
6805 int gr=(src>>11)&0x1F;
6808 case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
6809 case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
6810 case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
6811 case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
6815 dops[i].rs1=(src>>21)&0x1F;
6816 cinfo[i].imm=(short)src;
6817 if(op==0x32) gte_rt[i]=1ll<<((src>>16)&0x1F); // LWC2
6818 else gte_rs[i]=1ll<<((src>>16)&0x1F); // SWC2
6821 gte_rs[i]=gte_reg_reads[src&0x3f];
6822 gte_rt[i]=gte_reg_writes[src&0x3f];
6823 gte_rt[i]|=1ll<<63; // every op changes flags
6824 if((src&0x3f)==GTE_MVMVA) {
6825 int v = (src >> 15) & 3;
6826 gte_rs[i]&=~0xe3fll;
6827 if(v==3) gte_rs[i]|=0xe00ll;
6828 else gte_rs[i]|=3ll<<(v*2);
6841 static noinline void pass1_disassemble(u_int pagelimit)
6843 int i, j, done = 0, ni_count = 0;
6845 for (i = 0; !done; i++)
6847 int force_j_to_interpreter = 0;
6848 unsigned int type, op, op2;
6850 disassemble_one(i, source[i]);
6851 type = dops[i].itype;
6852 op = dops[i].opcode;
6853 op2 = dops[i].opcode2;
6855 /* Calculate branch target addresses */
6857 cinfo[i].ba=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
6858 else if(type==CJUMP&&dops[i].rs1==dops[i].rs2&&(op&1))
6859 cinfo[i].ba=start+i*4+8; // Ignore never taken branch
6860 else if(type==SJUMP&&dops[i].rs1==0&&!(op2&1))
6861 cinfo[i].ba=start+i*4+8; // Ignore never taken branch
6862 else if(type==CJUMP||type==SJUMP)
6863 cinfo[i].ba=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
6865 /* simplify always (not)taken branches */
6866 if (type == CJUMP && dops[i].rs1 == dops[i].rs2) {
6867 dops[i].rs1 = dops[i].rs2 = 0;
6869 dops[i].itype = type = UJUMP;
6870 dops[i].rs2 = CCREG;
6873 else if (type == SJUMP && dops[i].rs1 == 0 && (op2 & 1))
6874 dops[i].itype = type = UJUMP;
6876 dops[i].is_jump = type == RJUMP || type == UJUMP || type == CJUMP || type == SJUMP;
6877 dops[i].is_ujump = type == RJUMP || type == UJUMP;
6878 dops[i].is_load = type == LOAD || type == LOADLR || op == 0x32; // LWC2
6879 dops[i].is_delay_load = (dops[i].is_load || (source[i] & 0xf3d00000) == 0x40000000); // MFC/CFC
6880 dops[i].is_store = type == STORE || type == STORELR || op == 0x3a; // SWC2
6881 dops[i].is_exception = type == SYSCALL || type == HLECALL || type == INTCALL;
6882 dops[i].may_except = dops[i].is_exception || (type == ALU && (op2 == 0x20 || op2 == 0x22)) || op == 8;
6884 if (((op & 0x37) == 0x21 || op == 0x25) // LH/SH/LHU
6885 && ((cinfo[i].imm & 1) || Config.PreciseExceptions))
6886 dops[i].may_except = 1;
6887 if (((op & 0x37) == 0x23 || (op & 0x37) == 0x32) // LW/SW/LWC2/SWC2
6888 && ((cinfo[i].imm & 3) || Config.PreciseExceptions))
6889 dops[i].may_except = 1;
6891 /* rare messy cases to just pass over to the interpreter */
6892 if (i > 0 && dops[i-1].is_jump) {
6894 // branch in delay slot?
6895 if (dops[i].is_jump) {
6896 // don't handle first branch and call interpreter if it's hit
6897 SysPrintf("branch in DS @%08x (%08x)\n", start + i*4, start);
6898 force_j_to_interpreter = 1;
6900 // load delay detection through a branch
6901 else if (dops[i].is_delay_load && dops[i].rt1 != 0) {
6902 const struct decoded_insn *dop = NULL;
6904 if (cinfo[i-1].ba != -1) {
6905 t = (cinfo[i-1].ba - start) / 4;
6906 if (t < 0 || t > i) {
6908 u_int *mem = get_source_start(cinfo[i-1].ba, &limit);
6910 disassemble_one(MAXBLOCK - 1, mem[0]);
6911 dop = &dops[MAXBLOCK - 1];
6917 if ((dop && is_ld_use_hazard(dops[i].rt1, dop))
6918 || (!dop && Config.PreciseExceptions)) {
6919 // jump target wants DS result - potential load delay effect
6920 SysPrintf("load delay in DS @%08x (%08x)\n", start + i*4, start);
6921 force_j_to_interpreter = 1;
6922 if (0 <= t && t < i)
6923 dops[t + 1].bt = 1; // expected return from interpreter
6925 else if(i>=2&&dops[i-2].rt1==2&&dops[i].rt1==2&&dops[i].rs1!=2&&dops[i].rs2!=2&&dops[i-1].rs1!=2&&dops[i-1].rs2!=2&&
6926 !(i>=3&&dops[i-3].is_jump)) {
6927 // v0 overwrite like this is a sign of trouble, bail out
6928 SysPrintf("v0 overwrite @%08x (%08x)\n", start + i*4, start);
6929 force_j_to_interpreter = 1;
6933 else if (i > 0 && dops[i-1].is_delay_load
6934 && is_ld_use_hazard(dops[i-1].rt1, &dops[i])
6935 && (i < 2 || !dops[i-2].is_ujump)) {
6936 SysPrintf("load delay @%08x (%08x)\n", start + i*4, start);
6937 for (j = i - 1; j > 0 && dops[j-1].is_delay_load; j--)
6938 if (dops[j-1].rt1 != dops[i-1].rt1)
6940 force_j_to_interpreter = 1;
6942 if (force_j_to_interpreter) {
6945 i = j; // don't compile the problematic branch/load/etc
6947 if (dops[i].is_exception && i > 0 && dops[i-1].is_jump) {
6948 SysPrintf("exception in DS @%08x (%08x)\n", start + i*4, start);
6953 if (i >= 2 && (source[i-2] & 0xffe0f800) == 0x40806000) // MTC0 $12
6955 if (i >= 1 && (source[i-1] & 0xffe0f800) == 0x40806800) // MTC0 $13
6958 /* Is this the end of the block? */
6959 if (i > 0 && dops[i-1].is_ujump) {
6960 if (dops[i-1].rt1 == 0) { // not jal
6961 int found_bbranch = 0, t = (cinfo[i-1].ba - start) / 4;
6962 if ((u_int)(t - i) < 64 && start + (t+64)*4 < pagelimit) {
6963 // scan for a branch back to i+1
6964 for (j = t; j < t + 64; j++) {
6965 int tmpop = source[j] >> 26;
6966 if (tmpop == 1 || ((tmpop & ~3) == 4)) {
6967 int t2 = j + 1 + (int)(signed short)source[j];
6969 //printf("blk expand %08x<-%08x\n", start + (i+1)*4, start + j*4);
6980 if(stop_after_jal) done=1;
6982 if((source[i+1]&0xfc00003f)==0x0d) done=1;
6984 // Don't recompile stuff that's already compiled
6985 if(check_addr(start+i*4+4)) done=1;
6986 // Don't get too close to the limit
6987 if(i>MAXBLOCK/2) done=1;
6989 if (dops[i].itype == HLECALL)
6991 else if (dops[i].itype == INTCALL)
6993 else if (dops[i].is_exception)
6994 done = stop_after_jal ? 1 : 2;
6996 // Does the block continue due to a branch?
6999 if(cinfo[j].ba==start+i*4) done=j=0; // Branch into delay slot
7000 if(cinfo[j].ba==start+i*4+4) done=j=0;
7001 if(cinfo[j].ba==start+i*4+8) done=j=0;
7004 //assert(i<MAXBLOCK-1);
7005 if(start+i*4==pagelimit-4) done=1;
7006 assert(start+i*4<pagelimit);
7007 if (i==MAXBLOCK-1) done=1;
7008 // Stop if we're compiling junk
7009 if (dops[i].itype == INTCALL && (++ni_count > 8 || dops[i].opcode == 0x11)) {
7010 done=stop_after_jal=1;
7011 SysPrintf("Disabled speculative precompilation\n");
7014 while (i > 0 && dops[i-1].is_jump)
7017 assert(!dops[i-1].is_jump);
7021 // Basic liveness analysis for MIPS registers
7022 static noinline void pass2_unneeded_regs(int istart,int iend,int r)
7025 uint64_t u,gte_u,b,gte_b;
7026 uint64_t temp_u,temp_gte_u=0;
7027 uint64_t gte_u_unknown=0;
7028 if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
7032 gte_u=gte_u_unknown;
7034 //u=unneeded_reg[iend+1];
7036 gte_u=gte_unneeded[iend+1];
7039 for (i=iend;i>=istart;i--)
7041 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
7044 // If subroutine call, flag return address as a possible branch target
7045 if(dops[i].rt1==31 && i<slen-2) dops[i+2].bt=1;
7047 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
7049 // Branch out of this block, flush all regs
7051 gte_u=gte_u_unknown;
7052 branch_unneeded_reg[i]=u;
7053 // Merge in delay slot
7054 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7055 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7058 gte_u&=~gte_rs[i+1];
7062 // Internal branch, flag target
7063 dops[(cinfo[i].ba-start)>>2].bt=1;
7064 if(cinfo[i].ba<=start+i*4) {
7066 if(dops[i].is_ujump)
7068 // Unconditional branch
7072 // Conditional branch (not taken case)
7073 temp_u=unneeded_reg[i+2];
7074 temp_gte_u&=gte_unneeded[i+2];
7076 // Merge in delay slot
7077 temp_u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7078 temp_u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7080 temp_gte_u|=gte_rt[i+1];
7081 temp_gte_u&=~gte_rs[i+1];
7082 temp_u|=(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2);
7083 temp_u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7085 temp_gte_u|=gte_rt[i];
7086 temp_gte_u&=~gte_rs[i];
7087 unneeded_reg[i]=temp_u;
7088 gte_unneeded[i]=temp_gte_u;
7089 // Only go three levels deep. This recursion can take an
7090 // excessive amount of time if there are a lot of nested loops.
7092 pass2_unneeded_regs((cinfo[i].ba-start)>>2,i-1,r+1);
7094 unneeded_reg[(cinfo[i].ba-start)>>2]=1;
7095 gte_unneeded[(cinfo[i].ba-start)>>2]=gte_u_unknown;
7098 if (dops[i].is_ujump)
7100 // Unconditional branch
7101 u=unneeded_reg[(cinfo[i].ba-start)>>2];
7102 gte_u=gte_unneeded[(cinfo[i].ba-start)>>2];
7103 branch_unneeded_reg[i]=u;
7104 // Merge in delay slot
7105 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7106 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7109 gte_u&=~gte_rs[i+1];
7111 // Conditional branch
7112 b=unneeded_reg[(cinfo[i].ba-start)>>2];
7113 gte_b=gte_unneeded[(cinfo[i].ba-start)>>2];
7114 branch_unneeded_reg[i]=b;
7115 // Branch delay slot
7116 b|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7117 b&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7120 gte_b&=~gte_rs[i+1];
7124 branch_unneeded_reg[i]&=unneeded_reg[i+2];
7126 branch_unneeded_reg[i]=1;
7133 // Written registers are unneeded
7134 u|=1LL<<dops[i].rt1;
7135 u|=1LL<<dops[i].rt2;
7137 // Accessed registers are needed
7138 u&=~(1LL<<dops[i].rs1);
7139 u&=~(1LL<<dops[i].rs2);
7141 if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
7142 gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
7143 if (dops[i].may_except || dops[i].itype == RFE)
7145 // SYSCALL instruction, etc or conditional exception
7148 // Source-target dependencies
7149 // R0 is always unneeded
7153 gte_unneeded[i]=gte_u;
7155 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
7158 for(r=1;r<=CCREG;r++) {
7159 if((unneeded_reg[i]>>r)&1) {
7160 if(r==HIREG) printf(" HI");
7161 else if(r==LOREG) printf(" LO");
7162 else printf(" r%d",r);
7170 static noinline void pass3_register_alloc(u_int addr)
7172 struct regstat current; // Current register allocations/status
7173 clear_all_regs(current.regmap_entry);
7174 clear_all_regs(current.regmap);
7175 current.wasdirty = current.dirty = 0;
7176 current.u = unneeded_reg[0];
7177 alloc_reg(¤t, 0, CCREG);
7178 dirty_reg(¤t, CCREG);
7179 current.wasconst = 0;
7180 current.isconst = 0;
7181 current.loadedconst = 0;
7182 current.noevict = 0;
7183 //current.waswritten = 0;
7190 // First instruction is delay slot
7201 for(hr=0;hr<HOST_REGS;hr++)
7203 // Is this really necessary?
7204 if(current.regmap[hr]==0) current.regmap[hr]=-1;
7207 //current.waswritten=0;
7210 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
7211 regs[i].wasconst=current.isconst;
7212 regs[i].wasdirty=current.dirty;
7216 regs[i].loadedconst=0;
7217 if (!dops[i].is_jump) {
7219 current.u=unneeded_reg[i+1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7226 current.u=branch_unneeded_reg[i]&~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7227 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7230 SysPrintf("oops, branch at end of block with no delay slot @%08x\n", start + i*4);
7236 ds=0; // Skip delay slot, already allocated as part of branch
7237 // ...but we need to alloc it in case something jumps here
7239 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
7241 current.u=branch_unneeded_reg[i-1];
7243 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7245 struct regstat temp;
7246 memcpy(&temp,¤t,sizeof(current));
7247 temp.wasdirty=temp.dirty;
7248 // TODO: Take into account unconditional branches, as below
7249 delayslot_alloc(&temp,i);
7250 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
7251 regs[i].wasdirty=temp.wasdirty;
7252 regs[i].dirty=temp.dirty;
7256 // Create entry (branch target) regmap
7257 for(hr=0;hr<HOST_REGS;hr++)
7259 int r=temp.regmap[hr];
7261 if(r!=regmap_pre[i][hr]) {
7262 regs[i].regmap_entry[hr]=-1;
7267 if((current.u>>r)&1) {
7268 regs[i].regmap_entry[hr]=-1;
7269 regs[i].regmap[hr]=-1;
7270 //Don't clear regs in the delay slot as the branch might need them
7271 //current.regmap[hr]=-1;
7273 regs[i].regmap_entry[hr]=r;
7276 // First instruction expects CCREG to be allocated
7277 if(i==0&&hr==HOST_CCREG)
7278 regs[i].regmap_entry[hr]=CCREG;
7280 regs[i].regmap_entry[hr]=-1;
7284 else { // Not delay slot
7285 current.noevict = 0;
7286 switch(dops[i].itype) {
7288 //current.isconst=0; // DEBUG
7289 //current.wasconst=0; // DEBUG
7290 //regs[i].wasconst=0; // DEBUG
7291 clear_const(¤t,dops[i].rt1);
7292 alloc_cc(¤t,i);
7293 dirty_reg(¤t,CCREG);
7294 if (dops[i].rt1==31) {
7295 alloc_reg(¤t,i,31);
7296 dirty_reg(¤t,31);
7297 //assert(dops[i+1].rs1!=31&&dops[i+1].rs2!=31);
7298 //assert(dops[i+1].rt1!=dops[i].rt1);
7300 alloc_reg(¤t,i,PTEMP);
7304 delayslot_alloc(¤t,i+1);
7305 //current.isconst=0; // DEBUG
7309 //current.isconst=0;
7310 //current.wasconst=0;
7311 //regs[i].wasconst=0;
7312 clear_const(¤t,dops[i].rs1);
7313 clear_const(¤t,dops[i].rt1);
7314 alloc_cc(¤t,i);
7315 dirty_reg(¤t,CCREG);
7316 if (!ds_writes_rjump_rs(i)) {
7317 alloc_reg(¤t,i,dops[i].rs1);
7318 if (dops[i].rt1!=0) {
7319 alloc_reg(¤t,i,dops[i].rt1);
7320 dirty_reg(¤t,dops[i].rt1);
7322 alloc_reg(¤t,i,PTEMP);
7326 if(dops[i].rs1==31) { // JALR
7327 alloc_reg(¤t,i,RHASH);
7328 alloc_reg(¤t,i,RHTBL);
7331 delayslot_alloc(¤t,i+1);
7333 // The delay slot overwrites our source register,
7334 // allocate a temporary register to hold the old value.
7338 delayslot_alloc(¤t,i+1);
7340 alloc_reg(¤t,i,RTEMP);
7342 //current.isconst=0; // DEBUG
7347 //current.isconst=0;
7348 //current.wasconst=0;
7349 //regs[i].wasconst=0;
7350 clear_const(¤t,dops[i].rs1);
7351 clear_const(¤t,dops[i].rs2);
7352 if((dops[i].opcode&0x3E)==4) // BEQ/BNE
7354 alloc_cc(¤t,i);
7355 dirty_reg(¤t,CCREG);
7356 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7357 if(dops[i].rs2) alloc_reg(¤t,i,dops[i].rs2);
7358 if((dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2))||
7359 (dops[i].rs2&&(dops[i].rs2==dops[i+1].rt1||dops[i].rs2==dops[i+1].rt2))) {
7360 // The delay slot overwrites one of our conditions.
7361 // Allocate the branch condition registers instead.
7365 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7366 if(dops[i].rs2) alloc_reg(¤t,i,dops[i].rs2);
7371 delayslot_alloc(¤t,i+1);
7375 if((dops[i].opcode&0x3E)==6) // BLEZ/BGTZ
7377 alloc_cc(¤t,i);
7378 dirty_reg(¤t,CCREG);
7379 alloc_reg(¤t,i,dops[i].rs1);
7380 if(dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) {
7381 // The delay slot overwrites one of our conditions.
7382 // Allocate the branch condition registers instead.
7386 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7391 delayslot_alloc(¤t,i+1);
7395 // Don't alloc the delay slot yet because we might not execute it
7396 if((dops[i].opcode&0x3E)==0x14) // BEQL/BNEL
7401 alloc_cc(¤t,i);
7402 dirty_reg(¤t,CCREG);
7403 alloc_reg(¤t,i,dops[i].rs1);
7404 alloc_reg(¤t,i,dops[i].rs2);
7407 if((dops[i].opcode&0x3E)==0x16) // BLEZL/BGTZL
7412 alloc_cc(¤t,i);
7413 dirty_reg(¤t,CCREG);
7414 alloc_reg(¤t,i,dops[i].rs1);
7417 //current.isconst=0;
7420 clear_const(¤t,dops[i].rs1);
7421 clear_const(¤t,dops[i].rt1);
7423 alloc_cc(¤t,i);
7424 dirty_reg(¤t,CCREG);
7425 alloc_reg(¤t,i,dops[i].rs1);
7426 if (dops[i].rt1 == 31) { // BLTZAL/BGEZAL
7427 alloc_reg(¤t,i,31);
7428 dirty_reg(¤t,31);
7431 (dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) // The delay slot overwrites the branch condition.
7432 ||(dops[i].rt1 == 31 && dops[i].rs1 == 31) // overwrites it's own condition
7433 ||(dops[i].rt1==31&&(dops[i+1].rs1==31||dops[i+1].rs2==31||dops[i+1].rt1==31||dops[i+1].rt2==31))) { // DS touches $ra
7434 // Allocate the branch condition registers instead.
7438 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7443 delayslot_alloc(¤t,i+1);
7447 //current.isconst=0;
7450 imm16_alloc(¤t,i);
7454 load_alloc(¤t,i);
7458 store_alloc(¤t,i);
7461 alu_alloc(¤t,i);
7464 shift_alloc(¤t,i);
7467 multdiv_alloc(¤t,i);
7470 shiftimm_alloc(¤t,i);
7473 mov_alloc(¤t,i);
7476 cop0_alloc(¤t,i);
7479 rfe_alloc(¤t,i);
7482 cop2_alloc(¤t,i);
7485 c2ls_alloc(¤t,i);
7488 c2op_alloc(¤t,i);
7493 syscall_alloc(¤t,i);
7497 // Create entry (branch target) regmap
7498 for(hr=0;hr<HOST_REGS;hr++)
7501 r=current.regmap[hr];
7503 if(r!=regmap_pre[i][hr]) {
7504 // TODO: delay slot (?)
7505 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
7506 if(or<0||r>=TEMPREG){
7507 regs[i].regmap_entry[hr]=-1;
7511 // Just move it to a different register
7512 regs[i].regmap_entry[hr]=r;
7513 // If it was dirty before, it's still dirty
7514 if((regs[i].wasdirty>>or)&1) dirty_reg(¤t,r);
7521 regs[i].regmap_entry[hr]=0;
7526 if((current.u>>r)&1) {
7527 regs[i].regmap_entry[hr]=-1;
7528 //regs[i].regmap[hr]=-1;
7529 current.regmap[hr]=-1;
7531 regs[i].regmap_entry[hr]=r;
7535 // Branches expect CCREG to be allocated at the target
7536 if(regmap_pre[i][hr]==CCREG)
7537 regs[i].regmap_entry[hr]=CCREG;
7539 regs[i].regmap_entry[hr]=-1;
7542 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
7545 #if 0 // see do_store_smc_check()
7546 if(i>0&&(dops[i-1].itype==STORE||dops[i-1].itype==STORELR||(dops[i-1].itype==C2LS&&dops[i-1].opcode==0x3a))&&(u_int)cinfo[i-1].imm<0x800)
7547 current.waswritten|=1<<dops[i-1].rs1;
7548 current.waswritten&=~(1<<dops[i].rt1);
7549 current.waswritten&=~(1<<dops[i].rt2);
7550 if((dops[i].itype==STORE||dops[i].itype==STORELR||(dops[i].itype==C2LS&&dops[i].opcode==0x3a))&&(u_int)cinfo[i].imm>=0x800)
7551 current.waswritten&=~(1<<dops[i].rs1);
7554 /* Branch post-alloc */
7557 current.wasdirty=current.dirty;
7558 switch(dops[i-1].itype) {
7560 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7561 branch_regs[i-1].isconst=0;
7562 branch_regs[i-1].wasconst=0;
7563 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7564 alloc_cc(&branch_regs[i-1],i-1);
7565 dirty_reg(&branch_regs[i-1],CCREG);
7566 if(dops[i-1].rt1==31) { // JAL
7567 alloc_reg(&branch_regs[i-1],i-1,31);
7568 dirty_reg(&branch_regs[i-1],31);
7570 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7571 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7574 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7575 branch_regs[i-1].isconst=0;
7576 branch_regs[i-1].wasconst=0;
7577 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7578 alloc_cc(&branch_regs[i-1],i-1);
7579 dirty_reg(&branch_regs[i-1],CCREG);
7580 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rs1);
7581 if(dops[i-1].rt1!=0) { // JALR
7582 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rt1);
7583 dirty_reg(&branch_regs[i-1],dops[i-1].rt1);
7586 if(dops[i-1].rs1==31) { // JALR
7587 alloc_reg(&branch_regs[i-1],i-1,RHASH);
7588 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
7591 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7592 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7595 if((dops[i-1].opcode&0x3E)==4) // BEQ/BNE
7597 alloc_cc(¤t,i-1);
7598 dirty_reg(¤t,CCREG);
7599 if((dops[i-1].rs1&&(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2))||
7600 (dops[i-1].rs2&&(dops[i-1].rs2==dops[i].rt1||dops[i-1].rs2==dops[i].rt2))) {
7601 // The delay slot overwrote one of our conditions
7602 // Delay slot goes after the test (in order)
7603 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7605 delayslot_alloc(¤t,i);
7610 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7611 // Alloc the branch condition registers
7612 if(dops[i-1].rs1) alloc_reg(¤t,i-1,dops[i-1].rs1);
7613 if(dops[i-1].rs2) alloc_reg(¤t,i-1,dops[i-1].rs2);
7615 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7616 branch_regs[i-1].isconst=0;
7617 branch_regs[i-1].wasconst=0;
7618 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7619 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7622 if((dops[i-1].opcode&0x3E)==6) // BLEZ/BGTZ
7624 alloc_cc(¤t,i-1);
7625 dirty_reg(¤t,CCREG);
7626 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
7627 // The delay slot overwrote the branch condition
7628 // Delay slot goes after the test (in order)
7629 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7631 delayslot_alloc(¤t,i);
7636 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
7637 // Alloc the branch condition register
7638 alloc_reg(¤t,i-1,dops[i-1].rs1);
7640 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7641 branch_regs[i-1].isconst=0;
7642 branch_regs[i-1].wasconst=0;
7643 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7644 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7649 alloc_cc(¤t,i-1);
7650 dirty_reg(¤t,CCREG);
7651 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
7652 // The delay slot overwrote the branch condition
7653 // Delay slot goes after the test (in order)
7654 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7656 delayslot_alloc(¤t,i);
7661 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
7662 // Alloc the branch condition register
7663 alloc_reg(¤t,i-1,dops[i-1].rs1);
7665 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7666 branch_regs[i-1].isconst=0;
7667 branch_regs[i-1].wasconst=0;
7668 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7669 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7674 if (dops[i-1].is_ujump)
7676 if(dops[i-1].rt1==31) // JAL/JALR
7678 // Subroutine call will return here, don't alloc any registers
7680 clear_all_regs(current.regmap);
7681 alloc_reg(¤t,i,CCREG);
7682 dirty_reg(¤t,CCREG);
7686 // Internal branch will jump here, match registers to caller
7688 clear_all_regs(current.regmap);
7689 alloc_reg(¤t,i,CCREG);
7690 dirty_reg(¤t,CCREG);
7693 if(cinfo[j].ba==start+i*4+4) {
7694 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
7695 current.dirty=branch_regs[j].dirty;
7700 if(cinfo[j].ba==start+i*4+4) {
7701 for(hr=0;hr<HOST_REGS;hr++) {
7702 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
7703 current.regmap[hr]=-1;
7705 current.dirty&=branch_regs[j].dirty;
7714 // Count cycles in between branches
7715 cinfo[i].ccadj = CLOCK_ADJUST(cc);
7716 if (i > 0 && (dops[i-1].is_jump || dops[i].is_exception))
7720 #if !defined(DRC_DBG)
7721 else if(dops[i].itype==C2OP&>e_cycletab[source[i]&0x3f]>2)
7723 // this should really be removed since the real stalls have been implemented,
7724 // but doing so causes sizeable perf regression against the older version
7725 u_int gtec = gte_cycletab[source[i] & 0x3f];
7726 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? gtec/2 : 2;
7728 else if(i>1&&dops[i].itype==STORE&&dops[i-1].itype==STORE&&dops[i-2].itype==STORE&&!dops[i].bt)
7732 else if(dops[i].itype==C2LS)
7734 // same as with C2OP
7735 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? 4 : 2;
7743 if(!dops[i].is_ds) {
7744 regs[i].dirty=current.dirty;
7745 regs[i].isconst=current.isconst;
7746 memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
7748 for(hr=0;hr<HOST_REGS;hr++) {
7749 if(hr!=EXCLUDE_REG&®s[i].regmap[hr]>=0) {
7750 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
7751 regs[i].wasconst&=~(1<<hr);
7755 //regs[i].waswritten=current.waswritten;
7759 static noinline void pass4_cull_unused_regs(void)
7761 u_int last_needed_regs[4] = {0,0,0,0};
7765 for (i=slen-1;i>=0;i--)
7768 __builtin_prefetch(regs[i-2].regmap);
7771 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
7773 // Branch out of this block, don't need anything
7779 // Need whatever matches the target
7781 int t=(cinfo[i].ba-start)>>2;
7782 for(hr=0;hr<HOST_REGS;hr++)
7784 if(regs[i].regmap_entry[hr]>=0) {
7785 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
7789 // Conditional branch may need registers for following instructions
7790 if (!dops[i].is_ujump)
7793 nr |= last_needed_regs[(i+2) & 3];
7794 for(hr=0;hr<HOST_REGS;hr++)
7796 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
7797 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
7801 // Don't need stuff which is overwritten
7802 //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7803 //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7804 // Merge in delay slot
7805 if (dops[i+1].rt1) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt1);
7806 if (dops[i+1].rt2) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt2);
7807 nr |= get_regm(regmap_pre[i], dops[i+1].rs1);
7808 nr |= get_regm(regmap_pre[i], dops[i+1].rs2);
7809 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs1);
7810 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs2);
7811 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store)) {
7812 nr |= get_regm(regmap_pre[i], ROREG);
7813 nr |= get_regm(regs[i].regmap_entry, ROREG);
7815 if (dops[i+1].is_store) {
7816 nr |= get_regm(regmap_pre[i], INVCP);
7817 nr |= get_regm(regs[i].regmap_entry, INVCP);
7820 else if (dops[i].is_exception)
7822 // SYSCALL instruction, etc
7828 for(hr=0;hr<HOST_REGS;hr++) {
7829 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
7830 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
7831 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7832 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7836 // Overwritten registers are not needed
7837 if (dops[i].rt1) nr &= ~get_regm(regs[i].regmap, dops[i].rt1);
7838 if (dops[i].rt2) nr &= ~get_regm(regs[i].regmap, dops[i].rt2);
7839 nr &= ~get_regm(regs[i].regmap, FTEMP);
7840 // Source registers are needed
7841 nr |= get_regm(regmap_pre[i], dops[i].rs1);
7842 nr |= get_regm(regmap_pre[i], dops[i].rs2);
7843 nr |= get_regm(regs[i].regmap_entry, dops[i].rs1);
7844 nr |= get_regm(regs[i].regmap_entry, dops[i].rs2);
7845 if (ram_offset && (dops[i].is_load || dops[i].is_store)) {
7846 nr |= get_regm(regmap_pre[i], ROREG);
7847 nr |= get_regm(regs[i].regmap_entry, ROREG);
7849 if (dops[i].is_store) {
7850 nr |= get_regm(regmap_pre[i], INVCP);
7851 nr |= get_regm(regs[i].regmap_entry, INVCP);
7854 if (i > 0 && !dops[i].bt && regs[i].wasdirty)
7855 for(hr=0;hr<HOST_REGS;hr++)
7857 // Don't store a register immediately after writing it,
7858 // may prevent dual-issue.
7859 // But do so if this is a branch target, otherwise we
7860 // might have to load the register before the branch.
7861 if((regs[i].wasdirty>>hr)&1) {
7862 if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
7863 if(dops[i-1].rt1==regmap_pre[i][hr]) nr|=1<<hr;
7864 if(dops[i-1].rt2==regmap_pre[i][hr]) nr|=1<<hr;
7866 if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
7867 if(dops[i-1].rt1==regs[i].regmap_entry[hr]) nr|=1<<hr;
7868 if(dops[i-1].rt2==regs[i].regmap_entry[hr]) nr|=1<<hr;
7872 // Cycle count is needed at branches. Assume it is needed at the target too.
7873 if (i == 0 || dops[i].bt || dops[i].may_except || dops[i].itype == CJUMP) {
7874 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7875 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7878 last_needed_regs[i & 3] = nr;
7880 // Deallocate unneeded registers
7881 for(hr=0;hr<HOST_REGS;hr++)
7884 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
7887 int map1 = 0, map2 = 0, temp = 0; // or -1 ??
7888 if (dops[i+1].is_load || dops[i+1].is_store)
7890 if (dops[i+1].is_store)
7892 if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR || dops[i+1].itype==C2LS)
7894 if(regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
7895 regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
7896 regs[i].regmap[hr]!=dops[i+1].rt1 && regs[i].regmap[hr]!=dops[i+1].rt2 &&
7897 regs[i].regmap[hr]!=dops[i+1].rs1 && regs[i].regmap[hr]!=dops[i+1].rs2 &&
7898 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=PTEMP &&
7899 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
7900 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
7901 regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2)
7903 regs[i].regmap[hr]=-1;
7904 regs[i].isconst&=~(1<<hr);
7905 regs[i].dirty&=~(1<<hr);
7906 regs[i+1].wasdirty&=~(1<<hr);
7907 if(branch_regs[i].regmap[hr]!=dops[i].rs1 && branch_regs[i].regmap[hr]!=dops[i].rs2 &&
7908 branch_regs[i].regmap[hr]!=dops[i].rt1 && branch_regs[i].regmap[hr]!=dops[i].rt2 &&
7909 branch_regs[i].regmap[hr]!=dops[i+1].rt1 && branch_regs[i].regmap[hr]!=dops[i+1].rt2 &&
7910 branch_regs[i].regmap[hr]!=dops[i+1].rs1 && branch_regs[i].regmap[hr]!=dops[i+1].rs2 &&
7911 branch_regs[i].regmap[hr]!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
7912 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
7913 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
7914 branch_regs[i].regmap[hr]!=map1 && branch_regs[i].regmap[hr]!=map2)
7916 branch_regs[i].regmap[hr]=-1;
7917 branch_regs[i].regmap_entry[hr]=-1;
7918 if (!dops[i].is_ujump)
7921 regmap_pre[i+2][hr]=-1;
7922 regs[i+2].wasconst&=~(1<<hr);
7933 int map1 = -1, map2 = -1, temp=-1;
7934 if (dops[i].is_load || dops[i].is_store)
7936 if (dops[i].is_store)
7938 if (dops[i].itype==LOADLR || dops[i].itype==STORELR || dops[i].itype==C2LS)
7940 if(regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
7941 regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
7942 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2 &&
7943 //(dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG)
7944 regs[i].regmap[hr] != CCREG)
7946 if(i<slen-1&&!dops[i].is_ds) {
7947 assert(regs[i].regmap[hr]<64);
7948 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]>0)
7949 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
7951 SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
7952 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
7954 regmap_pre[i+1][hr]=-1;
7955 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
7956 regs[i+1].wasconst&=~(1<<hr);
7958 regs[i].regmap[hr]=-1;
7959 regs[i].isconst&=~(1<<hr);
7960 regs[i].dirty&=~(1<<hr);
7961 regs[i+1].wasdirty&=~(1<<hr);
7970 // If a register is allocated during a loop, try to allocate it for the
7971 // entire loop, if possible. This avoids loading/storing registers
7972 // inside of the loop.
7973 static noinline void pass5a_preallocate1(void)
7976 signed char f_regmap[HOST_REGS];
7977 clear_all_regs(f_regmap);
7978 for(i=0;i<slen-1;i++)
7980 if(dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
7982 if(cinfo[i].ba>=start && cinfo[i].ba<(start+i*4))
7983 if(dops[i+1].itype==NOP||dops[i+1].itype==MOV||dops[i+1].itype==ALU
7984 ||dops[i+1].itype==SHIFTIMM||dops[i+1].itype==IMM16||dops[i+1].itype==LOAD
7985 ||dops[i+1].itype==STORE||dops[i+1].itype==STORELR
7986 ||dops[i+1].itype==SHIFT
7987 ||dops[i+1].itype==COP2||dops[i+1].itype==C2LS||dops[i+1].itype==C2OP)
7989 int t=(cinfo[i].ba-start)>>2;
7990 if(t > 0 && !dops[t-1].is_jump) // loop_preload can't handle jumps into delay slots
7991 if(t<2||(dops[t-2].itype!=UJUMP&&dops[t-2].itype!=RJUMP)||dops[t-2].rt1!=31) // call/ret assumes no registers allocated
7992 for(hr=0;hr<HOST_REGS;hr++)
7994 if(regs[i].regmap[hr]>=0) {
7995 if(f_regmap[hr]!=regs[i].regmap[hr]) {
7996 // dealloc old register
7998 for(n=0;n<HOST_REGS;n++)
8000 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8002 // and alloc new one
8003 f_regmap[hr]=regs[i].regmap[hr];
8006 if(branch_regs[i].regmap[hr]>=0) {
8007 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
8008 // dealloc old register
8010 for(n=0;n<HOST_REGS;n++)
8012 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
8014 // and alloc new one
8015 f_regmap[hr]=branch_regs[i].regmap[hr];
8019 if(count_free_regs(regs[i].regmap)<=cinfo[i+1].min_free_regs)
8020 f_regmap[hr]=branch_regs[i].regmap[hr];
8022 if(count_free_regs(branch_regs[i].regmap)<=cinfo[i+1].min_free_regs)
8023 f_regmap[hr]=branch_regs[i].regmap[hr];
8025 // Avoid dirty->clean transition
8026 #ifdef DESTRUCTIVE_WRITEBACK
8027 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
8029 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
8030 // case above, however it's always a good idea. We can't hoist the
8031 // load if the register was already allocated, so there's no point
8032 // wasting time analyzing most of these cases. It only "succeeds"
8033 // when the mapping was different and the load can be replaced with
8034 // a mov, which is of negligible benefit. So such cases are
8036 if(f_regmap[hr]>0) {
8037 if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
8041 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,cinfo[i].ba,start+j*4,hr,r);
8042 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
8044 if(regs[j].regmap[hr]==f_regmap[hr]&&f_regmap[hr]<TEMPREG) {
8045 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,cinfo[i].ba,start+j*4,hr,r);
8047 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
8048 if(get_reg(regs[i].regmap,f_regmap[hr])>=0) break;
8049 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
8051 while(k>1&®s[k-1].regmap[hr]==-1) {
8052 if(count_free_regs(regs[k-1].regmap)<=cinfo[k-1].min_free_regs) {
8053 //printf("no free regs for store %x\n",start+(k-1)*4);
8056 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
8057 //printf("no-match due to different register\n");
8060 if (dops[k-2].is_jump) {
8061 //printf("no-match due to branch\n");
8064 // call/ret fast path assumes no registers allocated
8065 if(k>2&&(dops[k-3].itype==UJUMP||dops[k-3].itype==RJUMP)&&dops[k-3].rt1==31) {
8070 if(regs[k-1].regmap[hr]==f_regmap[hr]&®map_pre[k][hr]==f_regmap[hr]) {
8071 //printf("Extend r%d, %x ->\n",hr,start+k*4);
8073 regs[k].regmap_entry[hr]=f_regmap[hr];
8074 regs[k].regmap[hr]=f_regmap[hr];
8075 regmap_pre[k+1][hr]=f_regmap[hr];
8076 regs[k].wasdirty&=~(1<<hr);
8077 regs[k].dirty&=~(1<<hr);
8078 regs[k].wasdirty|=(1<<hr)®s[k-1].dirty;
8079 regs[k].dirty|=(1<<hr)®s[k].wasdirty;
8080 regs[k].wasconst&=~(1<<hr);
8081 regs[k].isconst&=~(1<<hr);
8086 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
8089 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
8090 if(regs[i-1].regmap[hr]==f_regmap[hr]&®map_pre[i][hr]==f_regmap[hr]) {
8091 //printf("OK fill %x (r%d)\n",start+i*4,hr);
8092 regs[i].regmap_entry[hr]=f_regmap[hr];
8093 regs[i].regmap[hr]=f_regmap[hr];
8094 regs[i].wasdirty&=~(1<<hr);
8095 regs[i].dirty&=~(1<<hr);
8096 regs[i].wasdirty|=(1<<hr)®s[i-1].dirty;
8097 regs[i].dirty|=(1<<hr)®s[i-1].dirty;
8098 regs[i].wasconst&=~(1<<hr);
8099 regs[i].isconst&=~(1<<hr);
8100 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
8101 branch_regs[i].wasdirty&=~(1<<hr);
8102 branch_regs[i].wasdirty|=(1<<hr)®s[i].dirty;
8103 branch_regs[i].regmap[hr]=f_regmap[hr];
8104 branch_regs[i].dirty&=~(1<<hr);
8105 branch_regs[i].dirty|=(1<<hr)®s[i].dirty;
8106 branch_regs[i].wasconst&=~(1<<hr);
8107 branch_regs[i].isconst&=~(1<<hr);
8108 if (!dops[i].is_ujump) {
8109 regmap_pre[i+2][hr]=f_regmap[hr];
8110 regs[i+2].wasdirty&=~(1<<hr);
8111 regs[i+2].wasdirty|=(1<<hr)®s[i].dirty;
8116 // Alloc register clean at beginning of loop,
8117 // but may dirty it in pass 6
8118 regs[k].regmap_entry[hr]=f_regmap[hr];
8119 regs[k].regmap[hr]=f_regmap[hr];
8120 regs[k].dirty&=~(1<<hr);
8121 regs[k].wasconst&=~(1<<hr);
8122 regs[k].isconst&=~(1<<hr);
8123 if (dops[k].is_jump) {
8124 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
8125 branch_regs[k].regmap[hr]=f_regmap[hr];
8126 branch_regs[k].dirty&=~(1<<hr);
8127 branch_regs[k].wasconst&=~(1<<hr);
8128 branch_regs[k].isconst&=~(1<<hr);
8129 if (!dops[k].is_ujump) {
8130 regmap_pre[k+2][hr]=f_regmap[hr];
8131 regs[k+2].wasdirty&=~(1<<hr);
8136 regmap_pre[k+1][hr]=f_regmap[hr];
8137 regs[k+1].wasdirty&=~(1<<hr);
8140 if(regs[j].regmap[hr]==f_regmap[hr])
8141 regs[j].regmap_entry[hr]=f_regmap[hr];
8145 if(regs[j].regmap[hr]>=0)
8147 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
8148 //printf("no-match due to different register\n");
8151 if (dops[j].is_ujump)
8153 // Stop on unconditional branch
8156 if(dops[j].itype==CJUMP||dops[j].itype==SJUMP)
8159 if(count_free_regs(regs[j].regmap)<=cinfo[j+1].min_free_regs)
8162 if(count_free_regs(branch_regs[j].regmap)<=cinfo[j+1].min_free_regs)
8165 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
8166 //printf("no-match due to different register (branch)\n");
8170 if(count_free_regs(regs[j].regmap)<=cinfo[j].min_free_regs) {
8171 //printf("No free regs for store %x\n",start+j*4);
8174 assert(f_regmap[hr]<64);
8181 // Non branch or undetermined branch target
8182 for(hr=0;hr<HOST_REGS;hr++)
8184 if(hr!=EXCLUDE_REG) {
8185 if(regs[i].regmap[hr]>=0) {
8186 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8187 // dealloc old register
8189 for(n=0;n<HOST_REGS;n++)
8191 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8193 // and alloc new one
8194 f_regmap[hr]=regs[i].regmap[hr];
8199 // Try to restore cycle count at branch targets
8201 for(j=i;j<slen-1;j++) {
8202 if(regs[j].regmap[HOST_CCREG]!=-1) break;
8203 if(count_free_regs(regs[j].regmap)<=cinfo[j].min_free_regs) {
8204 //printf("no free regs for store %x\n",start+j*4);
8208 if(regs[j].regmap[HOST_CCREG]==CCREG) {
8210 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
8212 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8213 regs[k].regmap[HOST_CCREG]=CCREG;
8214 regmap_pre[k+1][HOST_CCREG]=CCREG;
8215 regs[k+1].wasdirty|=1<<HOST_CCREG;
8216 regs[k].dirty|=1<<HOST_CCREG;
8217 regs[k].wasconst&=~(1<<HOST_CCREG);
8218 regs[k].isconst&=~(1<<HOST_CCREG);
8221 regs[j].regmap_entry[HOST_CCREG]=CCREG;
8223 // Work backwards from the branch target
8224 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
8226 //printf("Extend backwards\n");
8229 while(regs[k-1].regmap[HOST_CCREG]==-1) {
8230 if(count_free_regs(regs[k-1].regmap)<=cinfo[k-1].min_free_regs) {
8231 //printf("no free regs for store %x\n",start+(k-1)*4);
8236 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
8237 //printf("Extend CC, %x ->\n",start+k*4);
8239 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8240 regs[k].regmap[HOST_CCREG]=CCREG;
8241 regmap_pre[k+1][HOST_CCREG]=CCREG;
8242 regs[k+1].wasdirty|=1<<HOST_CCREG;
8243 regs[k].dirty|=1<<HOST_CCREG;
8244 regs[k].wasconst&=~(1<<HOST_CCREG);
8245 regs[k].isconst&=~(1<<HOST_CCREG);
8250 //printf("Fail Extend CC, %x ->\n",start+k*4);
8254 if(dops[i].itype!=STORE&&dops[i].itype!=STORELR&&dops[i].itype!=SHIFT&&
8255 dops[i].itype!=NOP&&dops[i].itype!=MOV&&dops[i].itype!=ALU&&dops[i].itype!=SHIFTIMM&&
8256 dops[i].itype!=IMM16&&dops[i].itype!=LOAD)
8258 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
8264 // This allocates registers (if possible) one instruction prior
8265 // to use, which can avoid a load-use penalty on certain CPUs.
8266 static noinline void pass5b_preallocate2(void)
8269 for(i=0;i<slen-1;i++)
8271 if (!i || !dops[i-1].is_jump)
8275 int j, can_steal = 1;
8276 for (j = i; j < i + 2; j++) {
8278 if (cinfo[j].min_free_regs == 0)
8280 for (hr = 0; hr < HOST_REGS; hr++)
8281 if (hr != EXCLUDE_REG && regs[j].regmap[hr] < 0)
8283 if (free_regs <= cinfo[j].min_free_regs) {
8290 if(dops[i].itype==ALU||dops[i].itype==MOV||dops[i].itype==LOAD||dops[i].itype==SHIFTIMM||dops[i].itype==IMM16
8291 ||(dops[i].itype==COP2&&dops[i].opcode2<3))
8294 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs1))>=0)
8296 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8298 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8299 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8300 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8301 regs[i].isconst&=~(1<<hr);
8302 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8303 constmap[i][hr]=constmap[i+1][hr];
8304 regs[i+1].wasdirty&=~(1<<hr);
8305 regs[i].dirty&=~(1<<hr);
8310 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs2))>=0)
8312 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8314 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8315 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8316 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8317 regs[i].isconst&=~(1<<hr);
8318 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8319 constmap[i][hr]=constmap[i+1][hr];
8320 regs[i+1].wasdirty&=~(1<<hr);
8321 regs[i].dirty&=~(1<<hr);
8325 // Preload target address for load instruction (non-constant)
8326 if(dops[i+1].itype==LOAD&&dops[i+1].rs1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8327 if((hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1))>=0)
8329 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8331 regs[i].regmap[hr]=dops[i+1].rs1;
8332 regmap_pre[i+1][hr]=dops[i+1].rs1;
8333 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8334 regs[i].isconst&=~(1<<hr);
8335 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8336 constmap[i][hr]=constmap[i+1][hr];
8337 regs[i+1].wasdirty&=~(1<<hr);
8338 regs[i].dirty&=~(1<<hr);
8342 // Load source into target register
8343 if(dops[i+1].use_lt1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8344 if((hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1))>=0)
8346 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8348 regs[i].regmap[hr]=dops[i+1].rs1;
8349 regmap_pre[i+1][hr]=dops[i+1].rs1;
8350 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8351 regs[i].isconst&=~(1<<hr);
8352 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8353 constmap[i][hr]=constmap[i+1][hr];
8354 regs[i+1].wasdirty&=~(1<<hr);
8355 regs[i].dirty&=~(1<<hr);
8359 // Address for store instruction (non-constant)
8360 if (dops[i+1].is_store) { // SB/SH/SW/SWC2
8361 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8362 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
8363 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8365 regs[i+1].regmap[hr]=AGEN1+((i+1)&1);
8366 regs[i+1].isconst&=~(1<<hr);
8367 regs[i+1].dirty&=~(1<<hr);
8368 regs[i+2].wasdirty&=~(1<<hr);
8371 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8373 regs[i].regmap[hr]=dops[i+1].rs1;
8374 regmap_pre[i+1][hr]=dops[i+1].rs1;
8375 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8376 regs[i].isconst&=~(1<<hr);
8377 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8378 constmap[i][hr]=constmap[i+1][hr];
8379 regs[i+1].wasdirty&=~(1<<hr);
8380 regs[i].dirty&=~(1<<hr);
8384 if (dops[i+1].itype == LOADLR || dops[i+1].opcode == 0x32) { // LWC2
8385 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8387 hr=get_reg(regs[i+1].regmap,FTEMP);
8389 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8391 regs[i].regmap[hr]=dops[i+1].rs1;
8392 regmap_pre[i+1][hr]=dops[i+1].rs1;
8393 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8394 regs[i].isconst&=~(1<<hr);
8395 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8396 constmap[i][hr]=constmap[i+1][hr];
8397 regs[i+1].wasdirty&=~(1<<hr);
8398 regs[i].dirty&=~(1<<hr);
8400 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
8402 // move it to another register
8403 regs[i+1].regmap[hr]=-1;
8404 regmap_pre[i+2][hr]=-1;
8405 regs[i+1].regmap[nr]=FTEMP;
8406 regmap_pre[i+2][nr]=FTEMP;
8407 regs[i].regmap[nr]=dops[i+1].rs1;
8408 regmap_pre[i+1][nr]=dops[i+1].rs1;
8409 regs[i+1].regmap_entry[nr]=dops[i+1].rs1;
8410 regs[i].isconst&=~(1<<nr);
8411 regs[i+1].isconst&=~(1<<nr);
8412 regs[i].dirty&=~(1<<nr);
8413 regs[i+1].wasdirty&=~(1<<nr);
8414 regs[i+1].dirty&=~(1<<nr);
8415 regs[i+2].wasdirty&=~(1<<nr);
8419 if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR/*||dops[i+1].itype==C2LS*/) {
8421 if(dops[i+1].itype==LOAD)
8422 hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1);
8423 if (dops[i+1].itype == LOADLR || dops[i+1].opcode == 0x32) // LWC2
8424 hr=get_reg(regs[i+1].regmap,FTEMP);
8425 if (dops[i+1].is_store) {
8426 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
8427 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8429 if(hr>=0&®s[i].regmap[hr]<0) {
8430 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
8431 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
8432 regs[i].regmap[hr]=AGEN1+((i+1)&1);
8433 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
8434 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
8435 regs[i].isconst&=~(1<<hr);
8436 regs[i+1].wasdirty&=~(1<<hr);
8437 regs[i].dirty&=~(1<<hr);
8447 // Write back dirty registers as soon as we will no longer modify them,
8448 // so that we don't end up with lots of writes at the branches.
8449 static noinline void pass6_clean_registers(int istart, int iend, int wr)
8451 static u_int wont_dirty[MAXBLOCK];
8452 static u_int will_dirty[MAXBLOCK];
8455 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
8456 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
8458 will_dirty_i=will_dirty_next=0;
8459 wont_dirty_i=wont_dirty_next=0;
8461 will_dirty_i=will_dirty_next=will_dirty[iend+1];
8462 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
8464 for (i=iend;i>=istart;i--)
8466 signed char rregmap_i[RRMAP_SIZE];
8467 u_int hr_candirty = 0;
8468 assert(HOST_REGS < 32);
8469 make_rregs(regs[i].regmap, rregmap_i, &hr_candirty);
8470 __builtin_prefetch(regs[i-1].regmap);
8473 signed char branch_rregmap_i[RRMAP_SIZE];
8474 u_int branch_hr_candirty = 0;
8475 make_rregs(branch_regs[i].regmap, branch_rregmap_i, &branch_hr_candirty);
8476 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
8478 // Branch out of this block, flush all regs
8480 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8481 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8482 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8483 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8484 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8485 will_dirty_i &= branch_hr_candirty;
8486 if (dops[i].is_ujump)
8488 // Unconditional branch
8490 // Merge in delay slot (will dirty)
8491 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8492 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8493 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8494 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8495 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8496 will_dirty_i &= hr_candirty;
8500 // Conditional branch
8501 wont_dirty_i = wont_dirty_next;
8502 // Merge in delay slot (will dirty)
8503 // (the original code had no explanation why these 2 are commented out)
8504 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8505 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8506 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8507 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8508 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8509 will_dirty_i &= hr_candirty;
8511 // Merge in delay slot (wont dirty)
8512 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8513 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8514 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8515 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8516 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8517 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8518 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8519 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8520 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8521 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8522 wont_dirty_i &= ~(1u << 31);
8524 #ifndef DESTRUCTIVE_WRITEBACK
8525 branch_regs[i].dirty&=wont_dirty_i;
8527 branch_regs[i].dirty|=will_dirty_i;
8533 if(cinfo[i].ba<=start+i*4) {
8535 if (dops[i].is_ujump)
8537 // Unconditional branch
8540 // Merge in delay slot (will dirty)
8541 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8542 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8543 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8544 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8545 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8546 temp_will_dirty &= branch_hr_candirty;
8547 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8548 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8549 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8550 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8551 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8552 temp_will_dirty &= hr_candirty;
8554 // Conditional branch (not taken case)
8555 temp_will_dirty=will_dirty_next;
8556 temp_wont_dirty=wont_dirty_next;
8557 // Merge in delay slot (will dirty)
8558 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8559 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8560 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8561 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8562 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8563 temp_will_dirty &= branch_hr_candirty;
8564 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8565 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8566 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8567 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8568 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8569 temp_will_dirty &= hr_candirty;
8571 // Merge in delay slot (wont dirty)
8572 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8573 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8574 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8575 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8576 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8577 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8578 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8579 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8580 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8581 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8582 temp_wont_dirty &= ~(1u << 31);
8583 // Deal with changed mappings
8585 for(r=0;r<HOST_REGS;r++) {
8586 if(r!=EXCLUDE_REG) {
8587 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
8588 temp_will_dirty&=~(1<<r);
8589 temp_wont_dirty&=~(1<<r);
8590 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8591 temp_will_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8592 temp_wont_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8594 temp_will_dirty|=1<<r;
8595 temp_wont_dirty|=1<<r;
8602 will_dirty[i]=temp_will_dirty;
8603 wont_dirty[i]=temp_wont_dirty;
8604 pass6_clean_registers((cinfo[i].ba-start)>>2,i-1,0);
8606 // Limit recursion. It can take an excessive amount
8607 // of time if there are a lot of nested loops.
8608 will_dirty[(cinfo[i].ba-start)>>2]=0;
8609 wont_dirty[(cinfo[i].ba-start)>>2]=-1;
8614 if (dops[i].is_ujump)
8616 // Unconditional branch
8619 //if(cinfo[i].ba>start+i*4) { // Disable recursion (for debugging)
8620 for(r=0;r<HOST_REGS;r++) {
8621 if(r!=EXCLUDE_REG) {
8622 if(branch_regs[i].regmap[r]==regs[(cinfo[i].ba-start)>>2].regmap_entry[r]) {
8623 will_dirty_i|=will_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8624 wont_dirty_i|=wont_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8626 if(branch_regs[i].regmap[r]>=0) {
8627 will_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8628 wont_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8633 // Merge in delay slot
8634 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8635 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8636 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8637 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8638 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8639 will_dirty_i &= branch_hr_candirty;
8640 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8641 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8642 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8643 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8644 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8645 will_dirty_i &= hr_candirty;
8647 // Conditional branch
8648 will_dirty_i=will_dirty_next;
8649 wont_dirty_i=wont_dirty_next;
8650 //if(cinfo[i].ba>start+i*4) // Disable recursion (for debugging)
8651 for(r=0;r<HOST_REGS;r++) {
8652 if(r!=EXCLUDE_REG) {
8653 signed char target_reg=branch_regs[i].regmap[r];
8654 if(target_reg==regs[(cinfo[i].ba-start)>>2].regmap_entry[r]) {
8655 will_dirty_i&=will_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8656 wont_dirty_i|=wont_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8658 else if(target_reg>=0) {
8659 will_dirty_i&=((unneeded_reg[(cinfo[i].ba-start)>>2]>>target_reg)&1)<<r;
8660 wont_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>target_reg)&1)<<r;
8664 // Merge in delay slot
8665 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8666 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8667 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8668 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8669 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8670 will_dirty_i &= branch_hr_candirty;
8671 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8672 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8673 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8674 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8675 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8676 will_dirty_i &= hr_candirty;
8678 // Merge in delay slot (won't dirty)
8679 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8680 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8681 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8682 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8683 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8684 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8685 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8686 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8687 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8688 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8689 wont_dirty_i &= ~(1u << 31);
8691 #ifndef DESTRUCTIVE_WRITEBACK
8692 branch_regs[i].dirty&=wont_dirty_i;
8694 branch_regs[i].dirty|=will_dirty_i;
8699 else if (dops[i].is_exception)
8701 // SYSCALL instruction, etc
8705 will_dirty_next=will_dirty_i;
8706 wont_dirty_next=wont_dirty_i;
8707 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8708 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8709 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8710 will_dirty_i &= hr_candirty;
8711 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8712 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8713 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8714 wont_dirty_i &= ~(1u << 31);
8715 if (i > istart && !dops[i].is_jump) {
8716 // Don't store a register immediately after writing it,
8717 // may prevent dual-issue.
8718 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt1) & 31);
8719 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt2) & 31);
8722 will_dirty[i]=will_dirty_i;
8723 wont_dirty[i]=wont_dirty_i;
8724 // Mark registers that won't be dirtied as not dirty
8726 regs[i].dirty|=will_dirty_i;
8727 #ifndef DESTRUCTIVE_WRITEBACK
8728 regs[i].dirty&=wont_dirty_i;
8731 if (i < iend-1 && !dops[i].is_ujump) {
8732 for(r=0;r<HOST_REGS;r++) {
8733 if(r!=EXCLUDE_REG) {
8734 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
8735 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
8736 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
8744 for(r=0;r<HOST_REGS;r++) {
8745 if(r!=EXCLUDE_REG) {
8746 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
8747 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
8748 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
8755 // Deal with changed mappings
8756 temp_will_dirty=will_dirty_i;
8757 temp_wont_dirty=wont_dirty_i;
8758 for(r=0;r<HOST_REGS;r++) {
8759 if(r!=EXCLUDE_REG) {
8761 if(regs[i].regmap[r]==regmap_pre[i][r]) {
8763 #ifndef DESTRUCTIVE_WRITEBACK
8764 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8766 regs[i].wasdirty|=will_dirty_i&(1<<r);
8769 else if(regmap_pre[i][r]>=0&&(nr=get_rreg(rregmap_i,regmap_pre[i][r]))>=0) {
8770 // Register moved to a different register
8771 will_dirty_i&=~(1<<r);
8772 wont_dirty_i&=~(1<<r);
8773 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
8774 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
8776 #ifndef DESTRUCTIVE_WRITEBACK
8777 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8779 regs[i].wasdirty|=will_dirty_i&(1<<r);
8783 will_dirty_i&=~(1<<r);
8784 wont_dirty_i&=~(1<<r);
8785 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8786 will_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8787 wont_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8790 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
8798 static noinline void pass10_expire_blocks(void)
8800 u_int step = MAX_OUTPUT_BLOCK_SIZE / PAGE_COUNT / 2;
8801 // not sizeof(ndrc->translation_cache) due to vita hack
8802 u_int step_mask = ((1u << TARGET_SIZE_2) - 1u) & ~(step - 1u);
8803 u_int end = (out - ndrc->translation_cache + EXPIRITY_OFFSET) & step_mask;
8804 u_int base_shift = __builtin_ctz(MAX_OUTPUT_BLOCK_SIZE);
8807 for (; expirep != end; expirep = ((expirep + step) & step_mask))
8809 u_int base_offs = expirep & ~(MAX_OUTPUT_BLOCK_SIZE - 1);
8810 u_int block_i = expirep / step & (PAGE_COUNT - 1);
8811 u_int phase = (expirep >> (base_shift - 1)) & 1u;
8812 if (!(expirep & (MAX_OUTPUT_BLOCK_SIZE / 2 - 1))) {
8813 inv_debug("EXP: base_offs %x/%lx phase %u\n", base_offs,
8814 (long)(out - ndrc->translation_cache), phase);
8818 hit = blocks_remove_matching_addrs(&blocks[block_i], base_offs, base_shift);
8822 memset(mini_ht, -1, sizeof(mini_ht));
8827 unlink_jumps_tc_range(jumps[block_i], base_offs, base_shift);
8831 static struct block_info *new_block_info(u_int start, u_int len,
8832 const void *source, const void *copy, u_char *beginning, u_short jump_in_count)
8834 struct block_info **b_pptr;
8835 struct block_info *block;
8836 u_int page = get_page(start);
8838 block = malloc(sizeof(*block) + jump_in_count * sizeof(block->jump_in[0]));
8840 assert(jump_in_count > 0);
8841 block->source = source;
8843 block->start = start;
8845 block->reg_sv_flags = 0;
8846 block->tc_offs = beginning - ndrc->translation_cache;
8847 //block->tc_len = out - beginning;
8848 block->is_dirty = 0;
8849 block->inv_near_misses = 0;
8850 block->jump_in_cnt = jump_in_count;
8852 // insert sorted by start mirror-unmasked vaddr
8853 for (b_pptr = &blocks[page]; ; b_pptr = &((*b_pptr)->next)) {
8854 if (*b_pptr == NULL || (*b_pptr)->start >= start) {
8855 block->next = *b_pptr;
8860 stat_inc(stat_blocks);
8864 static int new_recompile_block(u_int addr)
8866 u_int pagelimit = 0;
8867 u_int state_rflags = 0;
8870 assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
8873 if (addr != hack_addr) {
8874 SysPrintf("game crash @%08x, ra=%08x\n", addr, psxRegs.GPR.n.ra);
8880 // this is just for speculation
8881 for (i = 1; i < 32; i++) {
8882 if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
8883 state_rflags |= 1 << i;
8887 new_dynarec_did_compile=1;
8888 if (Config.HLE && start == 0x80001000) // hlecall
8890 // XXX: is this enough? Maybe check hleSoftCall?
8891 void *beginning = start_block();
8893 emit_movimm(start,0);
8894 emit_writeword(0,&pcaddr);
8895 emit_far_jump(new_dyna_leave);
8897 end_block(beginning);
8898 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8899 block->jump_in[0].vaddr = start;
8900 block->jump_in[0].addr = beginning;
8903 else if (f1_hack && hack_addr == 0) {
8904 void *beginning = start_block();
8905 emit_movimm(start, 0);
8906 emit_writeword(0, &hack_addr);
8907 emit_readword(&psxRegs.GPR.n.sp, 0);
8908 emit_readptr(&mem_rtab, 1);
8909 emit_shrimm(0, 12, 2);
8910 emit_readptr_dualindexedx_ptrlen(1, 2, 1);
8911 emit_addimm(0, 0x18, 0);
8912 emit_adds_ptr(1, 1, 1);
8913 emit_ldr_dualindexed(1, 0, 0);
8914 emit_writeword(0, &psxRegs.GPR.r[26]); // lw k0, 0x18(sp)
8915 emit_far_call(ndrc_get_addr_ht);
8916 emit_jmpreg(0); // jr k0
8918 end_block(beginning);
8920 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8921 block->jump_in[0].vaddr = start;
8922 block->jump_in[0].addr = beginning;
8923 SysPrintf("F1 hack to %08x\n", start);
8927 cycle_multiplier_active = Config.cycle_multiplier_override && Config.cycle_multiplier == CYCLE_MULT_DEFAULT
8928 ? Config.cycle_multiplier_override : Config.cycle_multiplier;
8930 source = get_source_start(start, &pagelimit);
8931 if (source == NULL) {
8932 if (addr != hack_addr) {
8933 SysPrintf("Compile at bogus memory address: %08x\n", addr);
8940 /* Pass 1: disassemble */
8941 /* Pass 2: register dependencies, branch targets */
8942 /* Pass 3: register allocation */
8943 /* Pass 4: branch dependencies */
8944 /* Pass 5: pre-alloc */
8945 /* Pass 6: optimize clean/dirty state */
8946 /* Pass 7: flag 32-bit registers */
8947 /* Pass 8: assembly */
8948 /* Pass 9: linker */
8949 /* Pass 10: garbage collection / free memory */
8951 /* Pass 1 disassembly */
8953 pass1_disassemble(pagelimit);
8955 int clear_hack_addr = apply_hacks();
8957 /* Pass 2 - Register dependencies and branch targets */
8959 pass2_unneeded_regs(0,slen-1,0);
8961 /* Pass 3 - Register allocation */
8963 pass3_register_alloc(addr);
8965 /* Pass 4 - Cull unused host registers */
8967 pass4_cull_unused_regs();
8969 /* Pass 5 - Pre-allocate registers */
8971 pass5a_preallocate1();
8972 pass5b_preallocate2();
8974 /* Pass 6 - Optimize clean/dirty state */
8975 pass6_clean_registers(0, slen-1, 1);
8978 for (i=slen-1;i>=0;i--)
8980 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
8982 // Conditional branch
8983 if((source[i]>>16)!=0x1000&&i<slen-2) {
8984 // Mark this address as a branch target since it may be called
8985 // upon return from interrupt
8991 /* Pass 8 - Assembly */
8992 linkcount=0;stubcount=0;
8995 void *beginning=start_block();
8996 void *instr_addr0_override = NULL;
8999 if (start == 0x80030000) {
9000 // nasty hack for the fastbios thing
9001 // override block entry to this code
9002 instr_addr0_override = out;
9003 emit_movimm(start,0);
9004 // abuse io address var as a flag that we
9005 // have already returned here once
9006 emit_readword(&address,1);
9007 emit_writeword(0,&pcaddr);
9008 emit_writeword(0,&address);
9011 emit_jeq(out + 4*2);
9012 emit_far_jump(new_dyna_leave);
9014 emit_jne(new_dyna_leave);
9019 __builtin_prefetch(regs[i+1].regmap);
9020 check_regmap(regmap_pre[i]);
9021 check_regmap(regs[i].regmap_entry);
9022 check_regmap(regs[i].regmap);
9023 //if(ds) printf("ds: ");
9024 disassemble_inst(i);
9026 ds=0; // Skip delay slot
9027 if(dops[i].bt) assem_debug("OOPS - branch into delay slot\n");
9028 instr_addr[i] = NULL;
9030 speculate_register_values(i);
9031 #ifndef DESTRUCTIVE_WRITEBACK
9032 if (i < 2 || !dops[i-2].is_ujump)
9034 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
9036 if((dops[i].itype==CJUMP||dops[i].itype==SJUMP)) {
9037 dirty_pre=branch_regs[i].dirty;
9039 dirty_pre=regs[i].dirty;
9043 if (i < 2 || !dops[i-2].is_ujump)
9045 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
9046 loop_preload(regmap_pre[i],regs[i].regmap_entry);
9048 // branch target entry point
9049 instr_addr[i] = out;
9050 assem_debug("<->\n");
9051 drc_dbg_emit_do_cmp(i, cinfo[i].ccadj);
9052 if (clear_hack_addr) {
9054 emit_writeword(0, &hack_addr);
9055 clear_hack_addr = 0;
9059 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG)
9060 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
9061 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i].rs1,dops[i].rs2);
9062 address_generation(i,®s[i],regs[i].regmap_entry);
9063 load_consts(regmap_pre[i],regs[i].regmap,i);
9066 // Load the delay slot registers if necessary
9067 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2&&(dops[i+1].rs1!=dops[i].rt1||dops[i].rt1==0))
9068 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9069 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2&&(dops[i+1].rs2!=dops[i].rt1||dops[i].rt1==0))
9070 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
9071 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store))
9072 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
9073 if (dops[i+1].is_store)
9074 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
9078 // Preload registers for following instruction
9079 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2)
9080 if(dops[i+1].rs1!=dops[i].rt1&&dops[i+1].rs1!=dops[i].rt2)
9081 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9082 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2)
9083 if(dops[i+1].rs2!=dops[i].rt1&&dops[i+1].rs2!=dops[i].rt2)
9084 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
9086 // TODO: if(is_ooo(i)) address_generation(i+1);
9087 if (!dops[i].is_jump || dops[i].itype == CJUMP)
9088 load_reg(regs[i].regmap_entry,regs[i].regmap,CCREG);
9089 if (ram_offset && (dops[i].is_load || dops[i].is_store))
9090 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
9091 if (dops[i].is_store)
9092 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
9094 ds = assemble(i, ®s[i], cinfo[i].ccadj);
9096 if (dops[i].is_ujump)
9099 literal_pool_jumpover(256);
9104 if (slen > 0 && dops[slen-1].itype == INTCALL) {
9105 // no ending needed for this block since INTCALL never returns
9107 // If the block did not end with an unconditional branch,
9108 // add a jump to the next instruction.
9110 if (!dops[i-2].is_ujump) {
9111 assert(!dops[i-1].is_jump);
9113 if(dops[i-2].itype!=CJUMP&&dops[i-2].itype!=SJUMP) {
9114 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9115 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9116 emit_loadreg(CCREG,HOST_CCREG);
9117 emit_addimm(HOST_CCREG, cinfo[i-1].ccadj + CLOCK_ADJUST(1), HOST_CCREG);
9121 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
9122 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
9124 add_to_linker(out,start+i*4,0);
9131 assert(!dops[i-1].is_jump);
9132 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9133 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9134 emit_loadreg(CCREG,HOST_CCREG);
9135 emit_addimm(HOST_CCREG, cinfo[i-1].ccadj + CLOCK_ADJUST(1), HOST_CCREG);
9136 add_to_linker(out,start+i*4,0);
9141 for(i = 0; i < stubcount; i++)
9143 switch(stubs[i].type)
9150 do_readstub(i);break;
9154 do_writestub(i);break;
9158 do_invstub(i);break;
9160 do_unalignedwritestub(i);break;
9162 do_overflowstub(i); break;
9163 case ALIGNMENT_STUB:
9164 do_alignmentstub(i); break;
9170 if (instr_addr0_override)
9171 instr_addr[0] = instr_addr0_override;
9174 /* check for improper expiration */
9175 for (i = 0; i < ARRAY_SIZE(jumps); i++) {
9179 for (j = 0; j < jumps[i]->count; j++)
9180 assert(jumps[i]->e[j].stub < beginning || (u_char *)jumps[i]->e[j].stub > out);
9184 /* Pass 9 - Linker */
9185 for(i=0;i<linkcount;i++)
9187 assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
9189 if (!link_addr[i].internal)
9192 void *addr = check_addr(link_addr[i].target);
9193 emit_extjump(link_addr[i].addr, link_addr[i].target);
9195 set_jump_target(link_addr[i].addr, addr);
9196 ndrc_add_jump_out(link_addr[i].target,stub);
9199 set_jump_target(link_addr[i].addr, stub);
9204 int target=(link_addr[i].target-start)>>2;
9205 assert(target>=0&&target<slen);
9206 assert(instr_addr[target]);
9207 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9208 //set_jump_target_fillslot(link_addr[i].addr,instr_addr[target],link_addr[i].ext>>1);
9210 set_jump_target(link_addr[i].addr, instr_addr[target]);
9215 u_int source_len = slen*4;
9216 if (dops[slen-1].itype == INTCALL && source_len > 4)
9217 // no need to treat the last instruction as compiled
9218 // as interpreter fully handles it
9221 if ((u_char *)copy + source_len > (u_char *)shadow + sizeof(shadow))
9224 // External Branch Targets (jump_in)
9225 int jump_in_count = 1;
9226 assert(instr_addr[0]);
9227 for (i = 1; i < slen; i++)
9229 if (dops[i].bt && instr_addr[i])
9233 struct block_info *block =
9234 new_block_info(start, slen * 4, source, copy, beginning, jump_in_count);
9235 block->reg_sv_flags = state_rflags;
9238 for (i = 0; i < slen; i++)
9240 if ((i == 0 || dops[i].bt) && instr_addr[i])
9242 assem_debug("%p (%d) <- %8x\n", instr_addr[i], i, start + i*4);
9243 u_int vaddr = start + i*4;
9249 entry = instr_addr[i];
9251 emit_jmp(instr_addr[i]);
9253 block->jump_in[jump_in_i].vaddr = vaddr;
9254 block->jump_in[jump_in_i].addr = entry;
9258 assert(jump_in_i == jump_in_count);
9259 hash_table_add(block->jump_in[0].vaddr, block->jump_in[0].addr);
9260 // Write out the literal pool if necessary
9262 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9264 if(((u_int)out)&7) emit_addnop(13);
9266 assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
9267 //printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
9268 memcpy(copy, source, source_len);
9271 end_block(beginning);
9273 // If we're within 256K of the end of the buffer,
9274 // start over from the beginning. (Is 256K enough?)
9275 if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
9276 out = ndrc->translation_cache;
9278 // Trap writes to any of the pages we compiled
9279 mark_invalid_code(start, slen*4, 0);
9281 /* Pass 10 - Free memory by expiring oldest blocks */
9283 pass10_expire_blocks();
9288 stat_inc(stat_bc_direct);
9292 // vim:shiftwidth=2:expandtab