1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2011 Ari64 *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
22 #include <stdint.h> //include for uint64_t
27 #include <libkern/OSCacheControl.h>
30 #include <3ds_utils.h>
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h"
39 #include "../psxinterpreter.h"
41 #include "emu_if.h" // emulator interface
42 #include "linkage_offsets.h"
43 #include "compiler_features.h"
44 #include "arm_features.h"
47 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
50 #define min(a, b) ((b) < (a) ? (b) : (a))
53 #define max(a, b) ((b) > (a) ? (b) : (a))
58 //#define REGMAP_PRINT // with DISASM only
63 #define assem_debug printf
65 #define assem_debug(...)
67 //#define inv_debug printf
68 #define inv_debug(...)
71 #include "assem_x86.h"
74 #include "assem_x64.h"
77 #include "assem_arm.h"
80 #include "assem_arm64.h"
83 #define RAM_SIZE 0x200000
85 #define MAX_OUTPUT_BLOCK_SIZE 262144
86 #define EXPIRITY_OFFSET (MAX_OUTPUT_BLOCK_SIZE * 2)
87 #define PAGE_COUNT 1024
89 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
90 #define INVALIDATE_USE_COND_CALL
94 // apparently Vita has a 16MB limit, so either we cut tc in half,
95 // or use this hack (it's a hack because tc size was designed to be power-of-2)
96 #define TC_REDUCE_BYTES 4096
98 #define TC_REDUCE_BYTES 0
103 struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
104 const void *f[2048 / sizeof(void *)];
109 u_char translation_cache[(1 << TARGET_SIZE_2) - TC_REDUCE_BYTES];
110 struct ndrc_tramp tramp;
113 #ifdef BASE_ADDR_DYNAMIC
114 static struct ndrc_mem *ndrc;
116 static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
117 static struct ndrc_mem *ndrc = &ndrc_;
119 #ifdef TC_WRITE_OFFSET
121 # include <sys/types.h>
122 # include <sys/stat.h>
126 static long ndrc_write_ofs;
127 #define NDRC_WRITE_OFFSET(x) (void *)((char *)(x) + ndrc_write_ofs)
129 #define NDRC_WRITE_OFFSET(x) (x)
152 // regmap_pre[i] - regs before [i] insn starts; dirty things here that
153 // don't match .regmap will be written back
154 // [i].regmap_entry - regs that must be set up if someone jumps here
155 // [i].regmap - regs [i] insn will read/(over)write
156 // branch_regs[i].* - same as above but for branches, takes delay slot into account
159 signed char regmap_entry[HOST_REGS];
160 signed char regmap[HOST_REGS];
163 u_int wasconst; // before; for example 'lw r2, (r2)' wasconst is true
164 u_int isconst; // ... but isconst is false when r2 is known (hr)
165 u_int loadedconst; // host regs that have constants loaded
166 u_int noevict; // can't evict this hr (alloced by current op)
167 //u_int waswritten; // MIPS regs that were used as store base before
198 struct block_info *next;
201 u_int start; // vaddr of the block start
202 u_int len; // of the whole block source
207 u_char inv_near_misses;
225 static struct decoded_insn
228 u_char opcode; // bits 31-26
229 u_char opcode2; // (depends on opcode)
242 u_char is_delay_load:1; // is_load + MFC/CFC
243 u_char is_exception:1; // unconditional, also interp. fallback
244 u_char may_except:1; // might generate an exception
245 u_char ls_type:2; // load/store type (ls_width_type)
249 LS_8 = 0, LS_16, LS_32, LS_LR
252 static struct compile_info
257 signed char min_free_regs;
259 signed char reserved[2];
263 static char invalid_code[0x100000];
264 static struct ht_entry hash_table[65536];
265 static struct block_info *blocks[PAGE_COUNT];
266 static struct jump_info *jumps[PAGE_COUNT];
268 static u_int *source;
269 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
270 static uint64_t gte_rt[MAXBLOCK];
271 static uint64_t gte_unneeded[MAXBLOCK];
272 static u_int smrv[32]; // speculated MIPS register values
273 static u_int smrv_strong; // mask or regs that are likely to have correct values
274 static u_int smrv_weak; // same, but somewhat less likely
275 static u_int smrv_strong_next; // same, but after current insn executes
276 static u_int smrv_weak_next;
277 static uint64_t unneeded_reg[MAXBLOCK];
278 static uint64_t branch_unneeded_reg[MAXBLOCK];
279 // see 'struct regstat' for a description
280 static signed char regmap_pre[MAXBLOCK][HOST_REGS];
281 // contains 'real' consts at [i] insn, but may differ from what's actually
282 // loaded in host reg as 'final' value is always loaded, see get_final_value()
283 static uint32_t current_constmap[HOST_REGS];
284 static uint32_t constmap[MAXBLOCK][HOST_REGS];
285 static struct regstat regs[MAXBLOCK];
286 static struct regstat branch_regs[MAXBLOCK];
288 static void *instr_addr[MAXBLOCK];
289 static struct link_entry link_addr[MAXBLOCK];
290 static int linkcount;
291 static struct code_stub stubs[MAXBLOCK*3];
292 static int stubcount;
293 static u_int literals[1024][2];
294 static int literalcount;
295 static int is_delayslot;
296 static char shadow[1048576] __attribute__((aligned(16)));
298 static u_int expirep;
299 static u_int stop_after_jal;
300 static u_int f1_hack;
302 static int stat_bc_direct;
303 static int stat_bc_pre;
304 static int stat_bc_restore;
305 static int stat_ht_lookups;
306 static int stat_jump_in_lookups;
307 static int stat_restore_tries;
308 static int stat_restore_compares;
309 static int stat_inv_addr_calls;
310 static int stat_inv_hits;
311 static int stat_blocks;
312 static int stat_links;
313 #define stat_inc(s) s++
314 #define stat_dec(s) s--
315 #define stat_clear(s) s = 0
319 #define stat_clear(s)
322 int new_dynarec_hacks;
323 int new_dynarec_hacks_pergame;
324 int new_dynarec_hacks_old;
325 int new_dynarec_did_compile;
327 #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
329 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0 (CCREG)
330 extern int last_count; // last absolute target, often = next_interupt
332 extern int pending_exception;
333 extern int branch_target;
334 extern uintptr_t ram_offset;
335 extern uintptr_t mini_ht[32][2];
337 /* registers that may be allocated */
339 #define LOREG 32 // lo
340 #define HIREG 33 // hi
341 //#define FSREG 34 // FPU status (FCSR)
342 //#define CSREG 35 // Coprocessor status
343 #define CCREG 36 // Cycle count
344 #define INVCP 37 // Pointer to invalid_code
345 //#define MMREG 38 // Pointer to memory_map
346 #define ROREG 39 // ram offset (if psxM != 0x80000000)
348 #define FTEMP 40 // Load/store temporary register (was fpu)
349 #define PTEMP 41 // Prefetch temporary register
350 //#define TLREG 42 // TLB mapping offset
351 #define RHASH 43 // Return address hash
352 #define RHTBL 44 // Return address hash table address
353 #define RTEMP 45 // JR/JALR address register
355 #define AGEN1 46 // Address generation temporary register (pass5b_preallocate2)
356 //#define AGEN2 47 // Address generation temporary register
358 /* instruction types */
359 #define NOP 0 // No operation
360 #define LOAD 1 // Load
361 #define STORE 2 // Store
362 #define LOADLR 3 // Unaligned load
363 #define STORELR 4 // Unaligned store
364 #define MOV 5 // Move (hi/lo only)
365 #define ALU 6 // Arithmetic/logic
366 #define MULTDIV 7 // Multiply/divide
367 #define SHIFT 8 // Shift by register
368 #define SHIFTIMM 9// Shift by immediate
369 #define IMM16 10 // 16-bit immediate
370 #define RJUMP 11 // Unconditional jump to register
371 #define UJUMP 12 // Unconditional jump
372 #define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
373 #define SJUMP 14 // Conditional branch (regimm format)
374 #define COP0 15 // Coprocessor 0
376 #define SYSCALL 22// SYSCALL,BREAK
377 #define OTHER 23 // Other/unknown - do nothing
378 #define HLECALL 26// PCSX fake opcodes for HLE
379 #define COP2 27 // Coprocessor 2 move
380 #define C2LS 28 // Coprocessor 2 load/store
381 #define C2OP 29 // Coprocessor 2 operation
382 #define INTCALL 30// Call interpreter to handle rare corner cases
388 #define DJT_1 (void *)1l // no function, just a label in assem_debug log
389 #define DJT_2 (void *)2l
394 void jump_syscall (u_int u0, u_int u1, u_int pc);
395 void jump_syscall_ds(u_int u0, u_int u1, u_int pc);
396 void jump_break (u_int u0, u_int u1, u_int pc);
397 void jump_break_ds(u_int u0, u_int u1, u_int pc);
398 void jump_overflow (u_int u0, u_int u1, u_int pc);
399 void jump_overflow_ds(u_int u0, u_int u1, u_int pc);
400 void jump_addrerror (u_int cause, u_int addr, u_int pc);
401 void jump_addrerror_ds(u_int cause, u_int addr, u_int pc);
402 void jump_to_new_pc();
403 void call_gteStall();
404 void new_dyna_leave();
406 void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile);
407 void *ndrc_get_addr_ht(u_int vaddr);
408 void ndrc_add_jump_out(u_int vaddr, void *src);
409 void ndrc_write_invalidate_one(u_int addr);
410 static void ndrc_write_invalidate_many(u_int addr, u_int end);
412 static int new_recompile_block(u_int addr);
413 static void invalidate_block(struct block_info *block);
414 static void exception_assemble(int i, const struct regstat *i_regs, int ccadj_);
416 // Needed by assembler
417 static void wb_register(signed char r, const signed char regmap[], u_int dirty);
418 static void wb_dirtys(const signed char i_regmap[], u_int i_dirty);
419 static void wb_needed_dirtys(const signed char i_regmap[], u_int i_dirty, int addr);
420 static void load_all_regs(const signed char i_regmap[]);
421 static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[]);
422 static void load_regs_entry(int t);
423 static void load_all_consts(const signed char regmap[], u_int dirty, int i);
424 static u_int get_host_reglist(const signed char *regmap);
426 static int get_final_value(int hr, int i, u_int *value);
427 static void add_stub(enum stub_type type, void *addr, void *retaddr,
428 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
429 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
430 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
431 static void add_to_linker(void *addr, u_int target, int ext);
432 static void *get_direct_memhandler(void *table, u_int addr,
433 enum stub_type type, uintptr_t *addr_host);
434 static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
435 static void pass_args(int a0, int a1);
436 static void emit_far_jump(const void *f);
437 static void emit_far_call(const void *f);
440 #include <psp2/kernel/sysmem.h>
442 // note: this interacts with RetroArch's Vita bootstrap code: bootstrap/vita/sbrk.c
443 extern int getVMBlock();
444 int _newlib_vm_size_user = sizeof(*ndrc);
447 static void mprotect_w_x(void *start, void *end, int is_x)
451 // *Open* enables write on all memory that was
452 // allocated by sceKernelAllocMemBlockForVM()?
454 sceKernelCloseVMDomain();
456 sceKernelOpenVMDomain();
457 #elif defined(HAVE_LIBNX)
459 // check to avoid the full flush in jitTransitionToExecutable()
460 if (g_jit.type != JitType_CodeMemory) {
462 rc = jitTransitionToExecutable(&g_jit);
464 rc = jitTransitionToWritable(&g_jit);
466 ;//SysPrintf("jitTransition %d %08x\n", is_x, rc);
468 #elif defined(TC_WRITE_OFFSET)
469 // separated rx and rw areas are always available
471 u_long mstart = (u_long)start & ~4095ul;
472 u_long mend = (u_long)end;
473 if (mprotect((void *)mstart, mend - mstart,
474 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
475 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
480 static void start_tcache_write(void *start, void *end)
482 mprotect_w_x(start, end, 0);
485 static void end_tcache_write(void *start, void *end)
487 #if defined(__arm__) || defined(__aarch64__)
488 size_t len = (char *)end - (char *)start;
489 #if defined(__BLACKBERRY_QNX__)
490 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
491 #elif defined(__MACH__)
492 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
494 sceKernelSyncVMDomain(sceBlock, start, len);
496 ctr_flush_invalidate_cache();
497 #elif defined(HAVE_LIBNX)
498 if (g_jit.type == JitType_CodeMemory) {
499 armDCacheClean(start, len);
500 armICacheInvalidate((char *)start - ndrc_write_ofs, len);
501 // as of v4.2.1 libnx lacks isb
502 __asm__ volatile("isb" ::: "memory");
504 #elif defined(__aarch64__)
505 // as of 2021, __clear_cache() is still broken on arm64
506 // so here is a custom one :(
507 clear_cache_arm64(start, end);
509 __clear_cache(start, end);
514 mprotect_w_x(start, end, 1);
517 static void *start_block(void)
519 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
520 if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
521 end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
522 start_tcache_write(NDRC_WRITE_OFFSET(out), NDRC_WRITE_OFFSET(end));
526 static void end_block(void *start)
528 end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(out));
531 #ifdef NDRC_CACHE_FLUSH_ALL
533 static int needs_clear_cache;
535 static void mark_clear_cache(void *target)
537 if (!needs_clear_cache) {
538 start_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
539 needs_clear_cache = 1;
543 static void do_clear_cache(void)
545 if (needs_clear_cache) {
546 end_tcache_write(NDRC_WRITE_OFFSET(ndrc), NDRC_WRITE_OFFSET(ndrc + 1));
547 needs_clear_cache = 0;
553 // also takes care of w^x mappings when patching code
554 static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
556 static void mark_clear_cache(void *target)
558 uintptr_t offset = (u_char *)target - ndrc->translation_cache;
559 u_int mask = 1u << ((offset >> 12) & 31);
560 if (!(needs_clear_cache[offset >> 17] & mask)) {
561 char *start = (char *)NDRC_WRITE_OFFSET((uintptr_t)target & ~4095l);
562 start_tcache_write(start, start + 4095);
563 needs_clear_cache[offset >> 17] |= mask;
567 // Clearing the cache is rather slow on ARM Linux, so mark the areas
568 // that need to be cleared, and then only clear these areas once.
569 static void do_clear_cache(void)
572 for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
574 u_int bitmap = needs_clear_cache[i];
577 for (j = 0; j < 32; j++)
580 if (!(bitmap & (1u << j)))
583 start = ndrc->translation_cache + i*131072 + j*4096;
585 for (j++; j < 32; j++) {
586 if (!(bitmap & (1u << j)))
590 end_tcache_write(NDRC_WRITE_OFFSET(start), NDRC_WRITE_OFFSET(end));
592 needs_clear_cache[i] = 0;
596 #endif // NDRC_CACHE_FLUSH_ALL
598 #define NO_CYCLE_PENALTY_THR 12
600 int cycle_multiplier_old;
601 static int cycle_multiplier_active;
603 static int CLOCK_ADJUST(int x)
605 int m = cycle_multiplier_active;
606 int s = (x >> 31) | 1;
607 return (x * m + s * 50) / 100;
610 static int ds_writes_rjump_rs(int i)
612 return dops[i].rs1 != 0
613 && (dops[i].rs1 == dops[i+1].rt1 || dops[i].rs1 == dops[i+1].rt2
614 || dops[i].rs1 == dops[i].rt1); // overwrites itself - same effect
617 // psx addr mirror masking (for invalidation)
618 static u_int pmmask(u_int vaddr)
620 vaddr &= ~0xe0000000;
621 if (vaddr < 0x01000000)
622 vaddr &= ~0x00e00000; // RAM mirrors
626 static u_int get_page(u_int vaddr)
628 u_int page = pmmask(vaddr) >> 12;
629 if (page >= PAGE_COUNT / 2)
630 page = PAGE_COUNT / 2 + (page & (PAGE_COUNT / 2 - 1));
634 // get a page for looking for a block that has vaddr
635 // (needed because the block may start in previous page)
636 static u_int get_page_prev(u_int vaddr)
638 assert(MAXBLOCK <= (1 << 12));
639 u_int page = get_page(vaddr);
645 static struct ht_entry *hash_table_get(u_int vaddr)
647 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
650 static void hash_table_add(u_int vaddr, void *tcaddr)
652 struct ht_entry *ht_bin = hash_table_get(vaddr);
654 ht_bin->vaddr[1] = ht_bin->vaddr[0];
655 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
656 ht_bin->vaddr[0] = vaddr;
657 ht_bin->tcaddr[0] = tcaddr;
660 static void hash_table_remove(int vaddr)
662 //printf("remove hash: %x\n",vaddr);
663 struct ht_entry *ht_bin = hash_table_get(vaddr);
664 if (ht_bin->vaddr[1] == vaddr) {
665 ht_bin->vaddr[1] = -1;
666 ht_bin->tcaddr[1] = NULL;
668 if (ht_bin->vaddr[0] == vaddr) {
669 ht_bin->vaddr[0] = ht_bin->vaddr[1];
670 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
671 ht_bin->vaddr[1] = -1;
672 ht_bin->tcaddr[1] = NULL;
676 static void mark_invalid_code(u_int vaddr, u_int len, char invalid)
678 u_int vaddr_m = vaddr & 0x1fffffff;
680 for (i = vaddr_m & ~0xfff; i < vaddr_m + len; i += 0x1000) {
681 // ram mirrors, but should not hurt bios
682 for (j = 0; j < 0x800000; j += 0x200000) {
683 invalid_code[(i|j) >> 12] =
684 invalid_code[(i|j|0x80000000u) >> 12] =
685 invalid_code[(i|j|0xa0000000u) >> 12] = invalid;
688 if (!invalid && vaddr + len > inv_code_start && vaddr <= inv_code_end)
689 inv_code_start = inv_code_end = ~0;
692 static int doesnt_expire_soon(u_char *tcaddr)
694 u_int diff = (u_int)(tcaddr - out) & ((1u << TARGET_SIZE_2) - 1u);
695 return diff > EXPIRITY_OFFSET + MAX_OUTPUT_BLOCK_SIZE;
698 static unused void check_for_block_changes(u_int start, u_int end)
700 u_int start_page = get_page_prev(start);
701 u_int end_page = get_page(end - 1);
704 for (page = start_page; page <= end_page; page++) {
705 struct block_info *block;
706 for (block = blocks[page]; block != NULL; block = block->next) {
709 if (memcmp(block->source, block->copy, block->len)) {
710 printf("bad block %08x-%08x %016llx %016llx @%08x\n",
711 block->start, block->start + block->len,
712 *(long long *)block->source, *(long long *)block->copy, psxRegs.pc);
720 static void *try_restore_block(u_int vaddr, u_int start_page, u_int end_page)
722 void *found_clean = NULL;
725 stat_inc(stat_restore_tries);
726 for (page = start_page; page <= end_page; page++) {
727 struct block_info *block;
728 for (block = blocks[page]; block != NULL; block = block->next) {
729 if (vaddr < block->start)
731 if (!block->is_dirty || vaddr >= block->start + block->len)
733 for (i = 0; i < block->jump_in_cnt; i++)
734 if (block->jump_in[i].vaddr == vaddr)
736 if (i == block->jump_in_cnt)
738 assert(block->source && block->copy);
739 stat_inc(stat_restore_compares);
740 if (memcmp(block->source, block->copy, block->len))
743 block->is_dirty = block->inv_near_misses = 0;
744 found_clean = block->jump_in[i].addr;
745 hash_table_add(vaddr, found_clean);
746 mark_invalid_code(block->start, block->len, 0);
747 stat_inc(stat_bc_restore);
748 inv_debug("INV: restored %08x %p (%d)\n", vaddr, found_clean, block->jump_in_cnt);
755 // this doesn't normally happen
756 static noinline u_int generate_exception(u_int pc)
758 //if (execBreakCheck(&psxRegs, pc))
759 // return psxRegs.pc;
761 // generate an address or bus error
762 psxRegs.CP0.n.Cause &= 0x300;
763 psxRegs.CP0.n.EPC = pc;
765 psxRegs.CP0.n.Cause |= R3000E_AdEL << 2;
766 psxRegs.CP0.n.BadVAddr = pc;
771 psxRegs.CP0.n.Cause |= R3000E_IBE << 2;
772 return (psxRegs.pc = 0x80000080);
775 // Get address from virtual address
776 // This is called from the recompiled JR/JALR instructions
777 static void noinline *get_addr(u_int vaddr, int can_compile)
779 u_int start_page = get_page_prev(vaddr);
780 u_int i, page, end_page = get_page(vaddr);
781 void *found_clean = NULL;
783 stat_inc(stat_jump_in_lookups);
784 for (page = start_page; page <= end_page; page++) {
785 const struct block_info *block;
786 for (block = blocks[page]; block != NULL; block = block->next) {
787 if (vaddr < block->start)
789 if (block->is_dirty || vaddr >= block->start + block->len)
791 for (i = 0; i < block->jump_in_cnt; i++)
792 if (block->jump_in[i].vaddr == vaddr)
794 if (i == block->jump_in_cnt)
796 found_clean = block->jump_in[i].addr;
797 hash_table_add(vaddr, found_clean);
801 found_clean = try_restore_block(vaddr, start_page, end_page);
808 int r = new_recompile_block(vaddr);
810 return ndrc_get_addr_ht(vaddr);
812 return ndrc_get_addr_ht(generate_exception(vaddr));
815 // Look up address in hash table first
816 void *ndrc_get_addr_ht_param(u_int vaddr, int can_compile)
818 //check_for_block_changes(vaddr, vaddr + MAXBLOCK);
819 const struct ht_entry *ht_bin = hash_table_get(vaddr);
820 u_int vaddr_a = vaddr & ~3;
821 stat_inc(stat_ht_lookups);
822 if (ht_bin->vaddr[0] == vaddr_a) return ht_bin->tcaddr[0];
823 if (ht_bin->vaddr[1] == vaddr_a) return ht_bin->tcaddr[1];
824 return get_addr(vaddr, can_compile);
827 void *ndrc_get_addr_ht(u_int vaddr)
829 return ndrc_get_addr_ht_param(vaddr, 1);
832 static void clear_all_regs(signed char regmap[])
834 memset(regmap, -1, sizeof(regmap[0]) * HOST_REGS);
837 // get_reg: get allocated host reg from mips reg
838 // returns -1 if no such mips reg was allocated
839 #if defined(__arm__) && defined(HAVE_ARMV6) && HOST_REGS == 13 && EXCLUDE_REG == 11
841 extern signed char get_reg(const signed char regmap[], signed char r);
845 static signed char get_reg(const signed char regmap[], signed char r)
848 for (hr = 0; hr < HOST_REGS; hr++) {
849 if (hr == EXCLUDE_REG)
859 // get reg suitable for writing
860 static signed char get_reg_w(const signed char regmap[], signed char r)
862 return r == 0 ? -1 : get_reg(regmap, r);
865 // get reg as mask bit (1 << hr)
866 static u_int get_regm(const signed char regmap[], signed char r)
868 return (1u << (get_reg(regmap, r) & 31)) & ~(1u << 31);
871 static signed char get_reg_temp(const signed char regmap[])
874 for (hr = 0; hr < HOST_REGS; hr++) {
875 if (hr == EXCLUDE_REG)
877 if (regmap[hr] == (signed char)-1)
883 // Find a register that is available for two consecutive cycles
884 static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
887 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map1[hr]==r&®map2[hr]==r) return hr;
891 // reverse reg map: mips -> host
892 #define RRMAP_SIZE 64
893 static void make_rregs(const signed char regmap[], signed char rrmap[RRMAP_SIZE],
894 u_int *regs_can_change)
896 u_int r, hr, hr_can_change = 0;
897 memset(rrmap, -1, RRMAP_SIZE);
898 for (hr = 0; hr < HOST_REGS; )
901 rrmap[r & (RRMAP_SIZE - 1)] = hr;
902 // only add mips $1-$31+$lo, others shifted out
903 hr_can_change |= (uint64_t)1 << (hr + ((r - 1) & 32));
905 if (hr == EXCLUDE_REG)
908 hr_can_change |= 1u << (rrmap[33] & 31);
909 hr_can_change |= 1u << (rrmap[CCREG] & 31);
910 hr_can_change &= ~(1u << 31);
911 *regs_can_change = hr_can_change;
914 // same as get_reg, but takes rrmap
915 static signed char get_rreg(signed char rrmap[RRMAP_SIZE], signed char r)
917 assert(0 <= r && r < RRMAP_SIZE);
921 static int count_free_regs(const signed char regmap[])
925 for(hr=0;hr<HOST_REGS;hr++)
927 if(hr!=EXCLUDE_REG) {
928 if(regmap[hr]<0) count++;
934 static void dirty_reg(struct regstat *cur, signed char reg)
938 hr = get_reg(cur->regmap, reg);
943 static void set_const(struct regstat *cur, signed char reg, uint32_t value)
947 hr = get_reg(cur->regmap, reg);
949 cur->isconst |= 1<<hr;
950 current_constmap[hr] = value;
954 static void clear_const(struct regstat *cur, signed char reg)
958 hr = get_reg(cur->regmap, reg);
960 cur->isconst &= ~(1<<hr);
963 static int is_const(const struct regstat *cur, signed char reg)
966 if (reg < 0) return 0;
968 hr = get_reg(cur->regmap, reg);
970 return (cur->isconst>>hr)&1;
974 static uint32_t get_const(const struct regstat *cur, signed char reg)
978 hr = get_reg(cur->regmap, reg);
980 return current_constmap[hr];
982 SysPrintf("Unknown constant in r%d\n", reg);
986 // Least soon needed registers
987 // Look at the next ten instructions and see which registers
988 // will be used. Try not to reallocate these.
989 static void lsn(u_char hsn[], int i)
999 if (dops[i+j].is_ujump)
1001 // Don't go past an unconditonal jump
1008 if(dops[i+j].rs1) hsn[dops[i+j].rs1]=j;
1009 if(dops[i+j].rs2) hsn[dops[i+j].rs2]=j;
1010 if(dops[i+j].rt1) hsn[dops[i+j].rt1]=j;
1011 if(dops[i+j].rt2) hsn[dops[i+j].rt2]=j;
1012 if(dops[i+j].itype==STORE || dops[i+j].itype==STORELR) {
1013 // Stores can allocate zero
1014 hsn[dops[i+j].rs1]=j;
1015 hsn[dops[i+j].rs2]=j;
1017 if (ram_offset && (dops[i+j].is_load || dops[i+j].is_store))
1019 // On some architectures stores need invc_ptr
1020 #if defined(HOST_IMM8)
1021 if (dops[i+j].is_store)
1024 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
1032 if(cinfo[i+b].ba>=start && cinfo[i+b].ba<(start+slen*4))
1034 // Follow first branch
1035 int t=(cinfo[i+b].ba-start)>>2;
1036 j=7-b;if(t+j>=slen) j=slen-t-1;
1039 if(dops[t+j].rs1) if(hsn[dops[t+j].rs1]>j+b+2) hsn[dops[t+j].rs1]=j+b+2;
1040 if(dops[t+j].rs2) if(hsn[dops[t+j].rs2]>j+b+2) hsn[dops[t+j].rs2]=j+b+2;
1041 //if(dops[t+j].rt1) if(hsn[dops[t+j].rt1]>j+b+2) hsn[dops[t+j].rt1]=j+b+2;
1042 //if(dops[t+j].rt2) if(hsn[dops[t+j].rt2]>j+b+2) hsn[dops[t+j].rt2]=j+b+2;
1045 // TODO: preferred register based on backward branch
1047 // Delay slot should preferably not overwrite branch conditions or cycle count
1048 if (i > 0 && dops[i-1].is_jump) {
1049 if(dops[i-1].rs1) if(hsn[dops[i-1].rs1]>1) hsn[dops[i-1].rs1]=1;
1050 if(dops[i-1].rs2) if(hsn[dops[i-1].rs2]>1) hsn[dops[i-1].rs2]=1;
1052 // ...or hash tables
1056 // Coprocessor load/store needs FTEMP, even if not declared
1057 if(dops[i].itype==C2LS) {
1060 // Load/store L/R also uses FTEMP as a temporary register
1061 if (dops[i].itype == LOADLR || dops[i].itype == STORELR) {
1064 // Don't remove the miniht registers
1065 if(dops[i].itype==UJUMP||dops[i].itype==RJUMP)
1072 // We only want to allocate registers if we're going to use them again soon
1073 static int needed_again(int r, int i)
1079 if (i > 0 && dops[i-1].is_ujump)
1081 if(cinfo[i-1].ba<start || cinfo[i-1].ba>start+slen*4-4)
1082 return 0; // Don't need any registers if exiting the block
1090 if (dops[i+j].is_ujump)
1092 // Don't go past an unconditonal jump
1096 if (dops[i+j].is_exception)
1103 if(dops[i+j].rs1==r) rn=j;
1104 if(dops[i+j].rs2==r) rn=j;
1105 if((unneeded_reg[i+j]>>r)&1) rn=10;
1106 if(i+j>=0&&(dops[i+j].itype==UJUMP||dops[i+j].itype==CJUMP||dops[i+j].itype==SJUMP))
1116 // Try to match register allocations at the end of a loop with those
1118 static int loop_reg(int i, int r, int hr)
1127 if (dops[i+j].is_ujump)
1129 // Don't go past an unconditonal jump
1136 if(dops[i-1].itype==UJUMP||dops[i-1].itype==CJUMP||dops[i-1].itype==SJUMP)
1142 if((unneeded_reg[i+k]>>r)&1) return hr;
1143 if(i+k>=0&&(dops[i+k].itype==UJUMP||dops[i+k].itype==CJUMP||dops[i+k].itype==SJUMP))
1145 if(cinfo[i+k].ba>=start && cinfo[i+k].ba<(start+i*4))
1147 int t=(cinfo[i+k].ba-start)>>2;
1148 int reg=get_reg(regs[t].regmap_entry,r);
1149 if(reg>=0) return reg;
1150 //reg=get_reg(regs[t+1].regmap_entry,r);
1151 //if(reg>=0) return reg;
1159 // Allocate every register, preserving source/target regs
1160 static void alloc_all(struct regstat *cur,int i)
1164 for(hr=0;hr<HOST_REGS;hr++) {
1165 if(hr!=EXCLUDE_REG) {
1166 if((cur->regmap[hr]!=dops[i].rs1)&&(cur->regmap[hr]!=dops[i].rs2)&&
1167 (cur->regmap[hr]!=dops[i].rt1)&&(cur->regmap[hr]!=dops[i].rt2))
1170 cur->dirty&=~(1<<hr);
1173 if(cur->regmap[hr]==0)
1176 cur->dirty&=~(1<<hr);
1183 static int host_tempreg_in_use;
1185 static void host_tempreg_acquire(void)
1187 assert(!host_tempreg_in_use);
1188 host_tempreg_in_use = 1;
1191 static void host_tempreg_release(void)
1193 host_tempreg_in_use = 0;
1196 static void host_tempreg_acquire(void) {}
1197 static void host_tempreg_release(void) {}
1201 extern void gen_interupt();
1202 extern void do_insn_cmp();
1203 #define FUNCNAME(f) { f, " " #f }
1204 static const struct {
1207 } function_names[] = {
1208 FUNCNAME(cc_interrupt),
1209 FUNCNAME(gen_interupt),
1210 FUNCNAME(ndrc_get_addr_ht),
1211 FUNCNAME(jump_handler_read8),
1212 FUNCNAME(jump_handler_read16),
1213 FUNCNAME(jump_handler_read32),
1214 FUNCNAME(jump_handler_write8),
1215 FUNCNAME(jump_handler_write16),
1216 FUNCNAME(jump_handler_write32),
1217 FUNCNAME(ndrc_write_invalidate_one),
1218 FUNCNAME(ndrc_write_invalidate_many),
1219 FUNCNAME(jump_to_new_pc),
1220 FUNCNAME(jump_break),
1221 FUNCNAME(jump_break_ds),
1222 FUNCNAME(jump_syscall),
1223 FUNCNAME(jump_syscall_ds),
1224 FUNCNAME(jump_overflow),
1225 FUNCNAME(jump_overflow_ds),
1226 FUNCNAME(jump_addrerror),
1227 FUNCNAME(jump_addrerror_ds),
1228 FUNCNAME(call_gteStall),
1229 FUNCNAME(new_dyna_leave),
1230 FUNCNAME(pcsx_mtc0),
1231 FUNCNAME(pcsx_mtc0_ds),
1234 FUNCNAME(do_memhandler_pre),
1235 FUNCNAME(do_memhandler_post),
1239 FUNCNAME(do_insn_cmp_arm64),
1241 FUNCNAME(do_insn_cmp),
1246 static const char *func_name(const void *a)
1249 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
1250 if (function_names[i].addr == a)
1251 return function_names[i].name;
1255 static const char *fpofs_name(u_int ofs)
1257 u_int *p = (u_int *)&dynarec_local + ofs/sizeof(u_int);
1258 static char buf[64];
1260 #define ofscase(x) case LO_##x: return " ; " #x
1261 ofscase(next_interupt);
1262 ofscase(cycle_count);
1263 ofscase(last_count);
1264 ofscase(pending_exception);
1275 ofscase(ram_offset);
1279 if (psxRegs.GPR.r <= p && p < &psxRegs.GPR.r[32])
1280 snprintf(buf, sizeof(buf), " ; r%d", (int)(p - psxRegs.GPR.r));
1281 else if (psxRegs.CP0.r <= p && p < &psxRegs.CP0.r[32])
1282 snprintf(buf, sizeof(buf), " ; cp0 $%d", (int)(p - psxRegs.CP0.r));
1283 else if (psxRegs.CP2D.r <= p && p < &psxRegs.CP2D.r[32])
1284 snprintf(buf, sizeof(buf), " ; cp2d $%d", (int)(p - psxRegs.CP2D.r));
1285 else if (psxRegs.CP2C.r <= p && p < &psxRegs.CP2C.r[32])
1286 snprintf(buf, sizeof(buf), " ; cp2c $%d", (int)(p - psxRegs.CP2C.r));
1290 #define func_name(x) ""
1291 #define fpofs_name(x) ""
1295 #include "assem_x86.c"
1298 #include "assem_x64.c"
1301 #include "assem_arm.c"
1304 #include "assem_arm64.c"
1307 static void *get_trampoline(const void *f)
1309 struct ndrc_tramp *tramp = NDRC_WRITE_OFFSET(&ndrc->tramp);
1312 for (i = 0; i < ARRAY_SIZE(tramp->f); i++) {
1313 if (tramp->f[i] == f || tramp->f[i] == NULL)
1316 if (i == ARRAY_SIZE(tramp->f)) {
1317 SysPrintf("trampoline table is full, last func %p\n", f);
1320 if (tramp->f[i] == NULL) {
1321 start_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
1323 end_tcache_write(&tramp->f[i], &tramp->f[i + 1]);
1325 // invalidate the RX mirror (unsure if necessary, but just in case...)
1326 armDCacheFlush(&ndrc->tramp.f[i], sizeof(ndrc->tramp.f[i]));
1329 return &ndrc->tramp.ops[i];
1332 static void emit_far_jump(const void *f)
1334 if (can_jump_or_call(f)) {
1339 f = get_trampoline(f);
1343 static void emit_far_call(const void *f)
1345 if (can_jump_or_call(f)) {
1350 f = get_trampoline(f);
1354 // Check if an address is already compiled
1355 // but don't return addresses which are about to expire from the cache
1356 static void *check_addr(u_int vaddr)
1358 struct ht_entry *ht_bin = hash_table_get(vaddr);
1360 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
1361 if (ht_bin->vaddr[i] == vaddr)
1362 if (doesnt_expire_soon(ht_bin->tcaddr[i]))
1363 return ht_bin->tcaddr[i];
1366 // refactor to get_addr_nocompile?
1367 u_int start_page = get_page_prev(vaddr);
1368 u_int page, end_page = get_page(vaddr);
1370 stat_inc(stat_jump_in_lookups);
1371 for (page = start_page; page <= end_page; page++) {
1372 const struct block_info *block;
1373 for (block = blocks[page]; block != NULL; block = block->next) {
1374 if (vaddr < block->start)
1376 if (block->is_dirty || vaddr >= block->start + block->len)
1378 if (!doesnt_expire_soon(ndrc->translation_cache + block->tc_offs))
1380 for (i = 0; i < block->jump_in_cnt; i++)
1381 if (block->jump_in[i].vaddr == vaddr)
1383 if (i == block->jump_in_cnt)
1386 // Update existing entry with current address
1387 void *addr = block->jump_in[i].addr;
1388 if (ht_bin->vaddr[0] == vaddr) {
1389 ht_bin->tcaddr[0] = addr;
1392 if (ht_bin->vaddr[1] == vaddr) {
1393 ht_bin->tcaddr[1] = addr;
1396 // Insert into hash table with low priority.
1397 // Don't evict existing entries, as they are probably
1398 // addresses that are being accessed frequently.
1399 if (ht_bin->vaddr[0] == -1) {
1400 ht_bin->vaddr[0] = vaddr;
1401 ht_bin->tcaddr[0] = addr;
1403 else if (ht_bin->vaddr[1] == -1) {
1404 ht_bin->vaddr[1] = vaddr;
1405 ht_bin->tcaddr[1] = addr;
1413 static void blocks_clear(struct block_info **head)
1415 struct block_info *cur, *next;
1417 if ((cur = *head)) {
1427 static int blocks_remove_matching_addrs(struct block_info **head,
1428 u_int base_offs, int shift)
1430 struct block_info *next;
1433 if ((((*head)->tc_offs ^ base_offs) >> shift) == 0) {
1434 inv_debug("EXP: rm block %08x (tc_offs %x)\n", (*head)->start, (*head)->tc_offs);
1435 invalidate_block(*head);
1436 next = (*head)->next;
1439 stat_dec(stat_blocks);
1444 head = &((*head)->next);
1450 // This is called when we write to a compiled block (see do_invstub)
1451 static void unlink_jumps_vaddr_range(u_int start, u_int end)
1453 u_int page, start_page = get_page(start), end_page = get_page(end - 1);
1456 for (page = start_page; page <= end_page; page++) {
1457 struct jump_info *ji = jumps[page];
1460 for (i = 0; i < ji->count; ) {
1461 if (ji->e[i].target_vaddr < start || ji->e[i].target_vaddr >= end) {
1466 inv_debug("INV: rm link to %08x (tc_offs %zx)\n", ji->e[i].target_vaddr,
1467 (u_char *)ji->e[i].stub - ndrc->translation_cache);
1468 void *host_addr = find_extjump_insn(ji->e[i].stub);
1469 mark_clear_cache(host_addr);
1470 set_jump_target(host_addr, ji->e[i].stub); // point back to dyna_linker stub
1472 stat_dec(stat_links);
1474 if (i < ji->count) {
1475 ji->e[i] = ji->e[ji->count];
1483 static void unlink_jumps_tc_range(struct jump_info *ji, u_int base_offs, int shift)
1488 for (i = 0; i < ji->count; ) {
1489 u_int tc_offs = (u_char *)ji->e[i].stub - ndrc->translation_cache;
1490 if (((tc_offs ^ base_offs) >> shift) != 0) {
1495 inv_debug("EXP: rm link to %08x (tc_offs %x)\n", ji->e[i].target_vaddr, tc_offs);
1496 stat_dec(stat_links);
1498 if (i < ji->count) {
1499 ji->e[i] = ji->e[ji->count];
1506 static void invalidate_block(struct block_info *block)
1510 block->is_dirty = 1;
1511 unlink_jumps_vaddr_range(block->start, block->start + block->len);
1512 for (i = 0; i < block->jump_in_cnt; i++)
1513 hash_table_remove(block->jump_in[i].vaddr);
1516 static int invalidate_range(u_int start, u_int end,
1517 u32 *inv_start_ret, u32 *inv_end_ret)
1519 struct block_info *last_block = NULL;
1520 u_int start_page = get_page_prev(start);
1521 u_int end_page = get_page(end - 1);
1522 u_int start_m = pmmask(start);
1523 u_int end_m = pmmask(end - 1);
1524 u_int inv_start, inv_end;
1525 u_int blk_start_m, blk_end_m;
1529 // additional area without code (to supplement invalid_code[]), [start, end)
1530 // avoids excessive ndrc_write_invalidate*() calls
1531 inv_start = start_m & ~0xfff;
1532 inv_end = end_m | 0xfff;
1534 for (page = start_page; page <= end_page; page++) {
1535 struct block_info *block;
1536 for (block = blocks[page]; block != NULL; block = block->next) {
1537 if (block->is_dirty)
1540 blk_end_m = pmmask(block->start + block->len);
1541 if (blk_end_m <= start_m) {
1542 inv_start = max(inv_start, blk_end_m);
1545 blk_start_m = pmmask(block->start);
1546 if (end_m <= blk_start_m) {
1547 inv_end = min(inv_end, blk_start_m - 1);
1550 if (!block->source) // "hack" block - leave it alone
1554 invalidate_block(block);
1555 stat_inc(stat_inv_hits);
1559 if (!hit && last_block && last_block->source) {
1560 // could be some leftover unused block, uselessly trapping writes
1561 last_block->inv_near_misses++;
1562 if (last_block->inv_near_misses > 128) {
1563 invalidate_block(last_block);
1564 stat_inc(stat_inv_hits);
1571 memset(mini_ht, -1, sizeof(mini_ht));
1575 if (inv_start <= (start_m & ~0xfff) && inv_end >= (start_m | 0xfff))
1576 // the whole page is empty now
1577 mark_invalid_code(start, 1, 1);
1579 if (inv_start_ret) *inv_start_ret = inv_start | (start & 0xe0000000);
1580 if (inv_end_ret) *inv_end_ret = inv_end | (end & 0xe0000000);
1584 void new_dynarec_invalidate_range(unsigned int start, unsigned int end)
1586 invalidate_range(start, end, NULL, NULL);
1589 static void ndrc_write_invalidate_many(u_int start, u_int end)
1591 // this check is done by the caller
1592 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1593 int ret = invalidate_range(start, end, &inv_code_start, &inv_code_end);
1595 int invc = invalid_code[start >> 12];
1596 u_int len = end - start;
1598 printf("INV ADDR: %08x/%02x hit %d blocks\n", start, len, ret);
1600 printf("INV ADDR: %08x/%02x miss, inv %08x-%08x invc %d->%d\n", start, len,
1601 inv_code_start, inv_code_end, invc, invalid_code[start >> 12]);
1602 check_for_block_changes(start, end);
1604 stat_inc(stat_inv_addr_calls);
1608 void ndrc_write_invalidate_one(u_int addr)
1610 ndrc_write_invalidate_many(addr, addr + 4);
1613 // This is called when loading a save state.
1614 // Anything could have changed, so invalidate everything.
1615 void new_dynarec_invalidate_all_pages(void)
1617 struct block_info *block;
1619 for (page = 0; page < ARRAY_SIZE(blocks); page++) {
1620 for (block = blocks[page]; block != NULL; block = block->next) {
1621 if (block->is_dirty)
1623 if (!block->source) // hack block?
1625 invalidate_block(block);
1630 memset(mini_ht, -1, sizeof(mini_ht));
1635 // Add an entry to jump_out after making a link
1636 // src should point to code by emit_extjump()
1637 void ndrc_add_jump_out(u_int vaddr, void *src)
1639 inv_debug("ndrc_add_jump_out: %p -> %x\n", src, vaddr);
1640 u_int page = get_page(vaddr);
1641 struct jump_info *ji;
1643 stat_inc(stat_links);
1644 check_extjump2(src);
1647 ji = malloc(sizeof(*ji) + sizeof(ji->e[0]) * 16);
1651 else if (ji->count >= ji->alloc) {
1653 ji = realloc(ji, sizeof(*ji) + sizeof(ji->e[0]) * ji->alloc);
1656 ji->e[ji->count].target_vaddr = vaddr;
1657 ji->e[ji->count].stub = src;
1661 /* Register allocation */
1663 static void alloc_set(struct regstat *cur, int reg, int hr)
1665 cur->regmap[hr] = reg;
1666 cur->dirty &= ~(1u << hr);
1667 cur->isconst &= ~(1u << hr);
1668 cur->noevict |= 1u << hr;
1671 static void evict_alloc_reg(struct regstat *cur, int i, int reg, int preferred_hr)
1673 u_char hsn[MAXREG+1];
1675 memset(hsn, 10, sizeof(hsn));
1677 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1679 // Don't evict the cycle count at entry points, otherwise the entry
1680 // stub will have to write it.
1681 if(dops[i].bt&&hsn[CCREG]>2) hsn[CCREG]=2;
1682 if (i>1 && hsn[CCREG] > 2 && dops[i-2].is_jump) hsn[CCREG]=2;
1685 // Alloc preferred register if available
1686 if (!((cur->noevict >> preferred_hr) & 1)
1687 && hsn[cur->regmap[preferred_hr]] == j)
1689 alloc_set(cur, reg, preferred_hr);
1692 for(r=1;r<=MAXREG;r++)
1694 if(hsn[r]==j&&r!=dops[i-1].rs1&&r!=dops[i-1].rs2&&r!=dops[i-1].rt1&&r!=dops[i-1].rt2) {
1695 for(hr=0;hr<HOST_REGS;hr++) {
1696 if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
1698 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1699 if(cur->regmap[hr]==r) {
1700 alloc_set(cur, reg, hr);
1711 for(r=1;r<=MAXREG;r++)
1714 for(hr=0;hr<HOST_REGS;hr++) {
1715 if (hr == EXCLUDE_REG || ((cur->noevict >> hr) & 1))
1717 if(cur->regmap[hr]==r) {
1718 alloc_set(cur, reg, hr);
1725 SysPrintf("This shouldn't happen (evict_alloc_reg)\n");
1729 // Note: registers are allocated clean (unmodified state)
1730 // if you intend to modify the register, you must call dirty_reg().
1731 static void alloc_reg(struct regstat *cur,int i,signed char reg)
1734 int preferred_reg = PREFERRED_REG_FIRST
1735 + reg % (PREFERRED_REG_LAST - PREFERRED_REG_FIRST + 1);
1736 if (reg == CCREG) preferred_reg = HOST_CCREG;
1737 if (reg == PTEMP || reg == FTEMP) preferred_reg = 12;
1738 assert(PREFERRED_REG_FIRST != EXCLUDE_REG && EXCLUDE_REG != HOST_REGS);
1741 // Don't allocate unused registers
1742 if((cur->u>>reg)&1) return;
1744 // see if it's already allocated
1745 if ((hr = get_reg(cur->regmap, reg)) >= 0) {
1746 cur->noevict |= 1u << hr;
1750 // Keep the same mapping if the register was already allocated in a loop
1751 preferred_reg = loop_reg(i,reg,preferred_reg);
1753 // Try to allocate the preferred register
1754 if (cur->regmap[preferred_reg] == -1) {
1755 alloc_set(cur, reg, preferred_reg);
1758 r=cur->regmap[preferred_reg];
1761 alloc_set(cur, reg, preferred_reg);
1765 // Clear any unneeded registers
1766 // We try to keep the mapping consistent, if possible, because it
1767 // makes branches easier (especially loops). So we try to allocate
1768 // first (see above) before removing old mappings. If this is not
1769 // possible then go ahead and clear out the registers that are no
1771 for(hr=0;hr<HOST_REGS;hr++)
1776 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1780 // Try to allocate any available register, but prefer
1781 // registers that have not been used recently.
1783 for (hr = PREFERRED_REG_FIRST; ; ) {
1784 if (cur->regmap[hr] < 0) {
1785 int oldreg = regs[i-1].regmap[hr];
1786 if (oldreg < 0 || (oldreg != dops[i-1].rs1 && oldreg != dops[i-1].rs2
1787 && oldreg != dops[i-1].rt1 && oldreg != dops[i-1].rt2))
1789 alloc_set(cur, reg, hr);
1794 if (hr == EXCLUDE_REG)
1796 if (hr == HOST_REGS)
1798 if (hr == PREFERRED_REG_FIRST)
1803 // Try to allocate any available register
1804 for (hr = PREFERRED_REG_FIRST; ; ) {
1805 if (cur->regmap[hr] < 0) {
1806 alloc_set(cur, reg, hr);
1810 if (hr == EXCLUDE_REG)
1812 if (hr == HOST_REGS)
1814 if (hr == PREFERRED_REG_FIRST)
1818 // Ok, now we have to evict someone
1819 // Pick a register we hopefully won't need soon
1820 evict_alloc_reg(cur, i, reg, preferred_reg);
1823 // Allocate a temporary register. This is done without regard to
1824 // dirty status or whether the register we request is on the unneeded list
1825 // Note: This will only allocate one register, even if called multiple times
1826 static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1830 // see if it's already allocated
1831 for (hr = 0; hr < HOST_REGS; hr++)
1833 if (hr != EXCLUDE_REG && cur->regmap[hr] == reg) {
1834 cur->noevict |= 1u << hr;
1839 // Try to allocate any available register
1840 for(hr=HOST_REGS-1;hr>=0;hr--) {
1841 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1842 alloc_set(cur, reg, hr);
1847 // Find an unneeded register
1848 for(hr=HOST_REGS-1;hr>=0;hr--)
1854 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1855 alloc_set(cur, reg, hr);
1862 // Ok, now we have to evict someone
1863 // Pick a register we hopefully won't need soon
1864 evict_alloc_reg(cur, i, reg, 0);
1867 static void mov_alloc(struct regstat *current,int i)
1869 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) {
1870 alloc_cc(current,i); // for stalls
1871 dirty_reg(current,CCREG);
1874 // Note: Don't need to actually alloc the source registers
1875 //alloc_reg(current,i,dops[i].rs1);
1876 alloc_reg(current,i,dops[i].rt1);
1878 clear_const(current,dops[i].rs1);
1879 clear_const(current,dops[i].rt1);
1880 dirty_reg(current,dops[i].rt1);
1883 static void shiftimm_alloc(struct regstat *current,int i)
1885 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
1888 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1889 else dops[i].use_lt1=!!dops[i].rs1;
1890 alloc_reg(current,i,dops[i].rt1);
1891 dirty_reg(current,dops[i].rt1);
1892 if(is_const(current,dops[i].rs1)) {
1893 int v=get_const(current,dops[i].rs1);
1894 if(dops[i].opcode2==0x00) set_const(current,dops[i].rt1,v<<cinfo[i].imm);
1895 if(dops[i].opcode2==0x02) set_const(current,dops[i].rt1,(u_int)v>>cinfo[i].imm);
1896 if(dops[i].opcode2==0x03) set_const(current,dops[i].rt1,v>>cinfo[i].imm);
1898 else clear_const(current,dops[i].rt1);
1903 clear_const(current,dops[i].rs1);
1904 clear_const(current,dops[i].rt1);
1907 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
1911 if(dops[i].opcode2==0x3c) // DSLL32
1915 if(dops[i].opcode2==0x3e) // DSRL32
1919 if(dops[i].opcode2==0x3f) // DSRA32
1925 static void shift_alloc(struct regstat *current,int i)
1928 if(dops[i].rs1) alloc_reg(current,i,dops[i].rs1);
1929 if(dops[i].rs2) alloc_reg(current,i,dops[i].rs2);
1930 alloc_reg(current,i,dops[i].rt1);
1931 if(dops[i].rt1==dops[i].rs2) {
1932 alloc_reg_temp(current,i,-1);
1933 cinfo[i].min_free_regs=1;
1935 clear_const(current,dops[i].rs1);
1936 clear_const(current,dops[i].rs2);
1937 clear_const(current,dops[i].rt1);
1938 dirty_reg(current,dops[i].rt1);
1942 static void alu_alloc(struct regstat *current,int i)
1944 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
1946 if(dops[i].rs1&&dops[i].rs2) {
1947 alloc_reg(current,i,dops[i].rs1);
1948 alloc_reg(current,i,dops[i].rs2);
1951 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1952 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
1954 alloc_reg(current,i,dops[i].rt1);
1956 if (dops[i].may_except) {
1957 alloc_cc_optional(current, i); // for exceptions
1958 alloc_reg_temp(current, i, -1);
1959 cinfo[i].min_free_regs = 1;
1962 else if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
1964 alloc_reg(current,i,dops[i].rs1);
1965 alloc_reg(current,i,dops[i].rs2);
1966 alloc_reg(current,i,dops[i].rt1);
1969 else if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
1971 if(dops[i].rs1&&dops[i].rs2) {
1972 alloc_reg(current,i,dops[i].rs1);
1973 alloc_reg(current,i,dops[i].rs2);
1977 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1978 if(dops[i].rs2&&needed_again(dops[i].rs2,i)) alloc_reg(current,i,dops[i].rs2);
1980 alloc_reg(current,i,dops[i].rt1);
1983 clear_const(current,dops[i].rs1);
1984 clear_const(current,dops[i].rs2);
1985 clear_const(current,dops[i].rt1);
1986 dirty_reg(current,dops[i].rt1);
1989 static void imm16_alloc(struct regstat *current,int i)
1991 if(dops[i].rs1&&needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
1992 else dops[i].use_lt1=!!dops[i].rs1;
1993 if(dops[i].rt1) alloc_reg(current,i,dops[i].rt1);
1994 if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
1995 clear_const(current,dops[i].rs1);
1996 clear_const(current,dops[i].rt1);
1998 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
1999 if(is_const(current,dops[i].rs1)) {
2000 int v=get_const(current,dops[i].rs1);
2001 if(dops[i].opcode==0x0c) set_const(current,dops[i].rt1,v&cinfo[i].imm);
2002 if(dops[i].opcode==0x0d) set_const(current,dops[i].rt1,v|cinfo[i].imm);
2003 if(dops[i].opcode==0x0e) set_const(current,dops[i].rt1,v^cinfo[i].imm);
2005 else clear_const(current,dops[i].rt1);
2007 else if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
2008 if(is_const(current,dops[i].rs1)) {
2009 int v=get_const(current,dops[i].rs1);
2010 set_const(current,dops[i].rt1,v+cinfo[i].imm);
2012 else clear_const(current,dops[i].rt1);
2013 if (dops[i].may_except) {
2014 alloc_cc_optional(current, i); // for exceptions
2015 alloc_reg_temp(current, i, -1);
2016 cinfo[i].min_free_regs = 1;
2020 set_const(current,dops[i].rt1,cinfo[i].imm<<16); // LUI
2022 dirty_reg(current,dops[i].rt1);
2025 static void load_alloc(struct regstat *current,int i)
2028 clear_const(current,dops[i].rt1);
2029 //if(dops[i].rs1!=dops[i].rt1&&needed_again(dops[i].rs1,i)) clear_const(current,dops[i].rs1); // Does this help or hurt?
2030 if(!dops[i].rs1) current->u&=~1LL; // Allow allocating r0 if it's the source register
2031 if (needed_again(dops[i].rs1, i))
2032 alloc_reg(current, i, dops[i].rs1);
2034 alloc_reg(current, i, ROREG);
2035 if (dops[i].may_except) {
2036 alloc_cc_optional(current, i); // for exceptions
2039 if(dops[i].rt1&&!((current->u>>dops[i].rt1)&1)) {
2040 alloc_reg(current,i,dops[i].rt1);
2041 assert(get_reg_w(current->regmap, dops[i].rt1)>=0);
2042 dirty_reg(current,dops[i].rt1);
2043 // LWL/LWR need a temporary register for the old value
2044 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
2046 alloc_reg(current,i,FTEMP);
2052 // Load to r0 or unneeded register (dummy load)
2053 // but we still need a register to calculate the address
2054 if(dops[i].opcode==0x22||dops[i].opcode==0x26)
2055 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
2059 alloc_reg_temp(current, i, -1);
2060 cinfo[i].min_free_regs = 1;
2064 // this may eat up to 7 registers
2065 static void store_alloc(struct regstat *current, int i)
2067 clear_const(current,dops[i].rs2);
2068 if(!(dops[i].rs2)) current->u&=~1LL; // Allow allocating r0 if necessary
2069 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
2070 alloc_reg(current,i,dops[i].rs2);
2072 alloc_reg(current, i, ROREG);
2073 #if defined(HOST_IMM8)
2074 // On CPUs without 32-bit immediates we need a pointer to invalid_code
2075 alloc_reg(current, i, INVCP);
2077 if (dops[i].opcode == 0x2a || dops[i].opcode == 0x2e) { // SWL/SWL
2078 alloc_reg(current,i,FTEMP);
2080 if (dops[i].may_except)
2081 alloc_cc_optional(current, i); // for exceptions
2082 // We need a temporary register for address generation
2083 alloc_reg_temp(current,i,-1);
2084 cinfo[i].min_free_regs=1;
2087 static void c2ls_alloc(struct regstat *current, int i)
2089 clear_const(current,dops[i].rt1);
2090 if(needed_again(dops[i].rs1,i)) alloc_reg(current,i,dops[i].rs1);
2091 alloc_reg(current,i,FTEMP);
2093 alloc_reg(current, i, ROREG);
2094 #if defined(HOST_IMM8)
2095 // On CPUs without 32-bit immediates we need a pointer to invalid_code
2096 if (dops[i].opcode == 0x3a) // SWC2
2097 alloc_reg(current,i,INVCP);
2099 if (dops[i].may_except)
2100 alloc_cc_optional(current, i); // for exceptions
2101 // We need a temporary register for address generation
2102 alloc_reg_temp(current,i,-1);
2103 cinfo[i].min_free_regs=1;
2106 #ifndef multdiv_alloc
2107 static void multdiv_alloc(struct regstat *current,int i)
2113 clear_const(current,dops[i].rs1);
2114 clear_const(current,dops[i].rs2);
2115 alloc_cc(current,i); // for stalls
2116 dirty_reg(current,CCREG);
2117 current->u &= ~(1ull << HIREG);
2118 current->u &= ~(1ull << LOREG);
2119 alloc_reg(current, i, HIREG);
2120 alloc_reg(current, i, LOREG);
2121 dirty_reg(current, HIREG);
2122 dirty_reg(current, LOREG);
2123 if ((dops[i].opcode2 & 0x3e) == 0x1a || (dops[i].rs1 && dops[i].rs2)) // div(u)
2125 alloc_reg(current, i, dops[i].rs1);
2126 alloc_reg(current, i, dops[i].rs2);
2128 // else multiply by zero is zero
2132 static void cop0_alloc(struct regstat *current,int i)
2134 if(dops[i].opcode2==0) // MFC0
2137 clear_const(current,dops[i].rt1);
2138 alloc_reg(current,i,dops[i].rt1);
2139 dirty_reg(current,dops[i].rt1);
2142 else if(dops[i].opcode2==4) // MTC0
2144 if (((source[i]>>11)&0x1e) == 12) {
2145 alloc_cc(current, i);
2146 dirty_reg(current, CCREG);
2149 clear_const(current,dops[i].rs1);
2150 alloc_reg(current,i,dops[i].rs1);
2151 alloc_all(current,i);
2154 alloc_all(current,i); // FIXME: Keep r0
2156 alloc_reg(current,i,0);
2158 cinfo[i].min_free_regs = HOST_REGS;
2162 static void rfe_alloc(struct regstat *current, int i)
2164 alloc_all(current, i);
2165 cinfo[i].min_free_regs = HOST_REGS;
2168 static void cop2_alloc(struct regstat *current,int i)
2170 if (dops[i].opcode2 < 3) // MFC2/CFC2
2172 alloc_cc(current,i); // for stalls
2173 dirty_reg(current,CCREG);
2175 clear_const(current,dops[i].rt1);
2176 alloc_reg(current,i,dops[i].rt1);
2177 dirty_reg(current,dops[i].rt1);
2180 else if (dops[i].opcode2 > 3) // MTC2/CTC2
2183 clear_const(current,dops[i].rs1);
2184 alloc_reg(current,i,dops[i].rs1);
2188 alloc_reg(current,i,0);
2191 alloc_reg_temp(current,i,-1);
2192 cinfo[i].min_free_regs=1;
2195 static void c2op_alloc(struct regstat *current,int i)
2197 alloc_cc(current,i); // for stalls
2198 dirty_reg(current,CCREG);
2199 alloc_reg_temp(current,i,-1);
2202 static void syscall_alloc(struct regstat *current,int i)
2204 alloc_cc(current,i);
2205 dirty_reg(current,CCREG);
2206 alloc_all(current,i);
2207 cinfo[i].min_free_regs=HOST_REGS;
2211 static void delayslot_alloc(struct regstat *current,int i)
2213 switch(dops[i].itype) {
2221 imm16_alloc(current,i);
2225 load_alloc(current,i);
2229 store_alloc(current,i);
2232 alu_alloc(current,i);
2235 shift_alloc(current,i);
2238 multdiv_alloc(current,i);
2241 shiftimm_alloc(current,i);
2244 mov_alloc(current,i);
2247 cop0_alloc(current,i);
2250 rfe_alloc(current,i);
2253 cop2_alloc(current,i);
2256 c2ls_alloc(current,i);
2259 c2op_alloc(current,i);
2264 static void add_stub(enum stub_type type, void *addr, void *retaddr,
2265 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
2267 assert(stubcount < ARRAY_SIZE(stubs));
2268 stubs[stubcount].type = type;
2269 stubs[stubcount].addr = addr;
2270 stubs[stubcount].retaddr = retaddr;
2271 stubs[stubcount].a = a;
2272 stubs[stubcount].b = b;
2273 stubs[stubcount].c = c;
2274 stubs[stubcount].d = d;
2275 stubs[stubcount].e = e;
2279 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
2280 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
2282 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
2285 // Write out a single register
2286 static void wb_register(signed char r, const signed char regmap[], u_int dirty)
2289 for(hr=0;hr<HOST_REGS;hr++) {
2290 if(hr!=EXCLUDE_REG) {
2293 assert(regmap[hr]<64);
2294 emit_storereg(r,hr);
2302 static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
2304 //if(dirty_pre==dirty) return;
2306 for (hr = 0; hr < HOST_REGS; hr++) {
2308 if (r < 1 || r > 33 || ((u >> r) & 1))
2310 if (((dirty_pre & ~dirty) >> hr) & 1)
2311 emit_storereg(r, hr);
2316 static void pass_args(int a0, int a1)
2320 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2322 else if(a0!=0&&a1==0) {
2324 if (a0>=0) emit_mov(a0,0);
2327 if(a0>=0&&a0!=0) emit_mov(a0,0);
2328 if(a1>=0&&a1!=1) emit_mov(a1,1);
2332 static void alu_assemble(int i, const struct regstat *i_regs, int ccadj_)
2334 if(dops[i].opcode2>=0x20&&dops[i].opcode2<=0x23) { // ADD/ADDU/SUB/SUBU
2335 int do_oflow = dops[i].may_except; // ADD/SUB with exceptions enabled
2336 if (dops[i].rt1 || do_oflow) {
2337 int do_exception_check = 0;
2338 signed char s1, s2, t, tmp;
2339 t = get_reg_w(i_regs->regmap, dops[i].rt1);
2340 tmp = get_reg_temp(i_regs->regmap);
2343 if (t < 0 && do_oflow)
2346 s1 = get_reg(i_regs->regmap, dops[i].rs1);
2347 s2 = get_reg(i_regs->regmap, dops[i].rs2);
2348 if (dops[i].rs1 && dops[i].rs2) {
2351 if (dops[i].opcode2 & 2) {
2353 emit_subs(s1, s2, tmp);
2354 do_exception_check = 1;
2361 emit_adds(s1, s2, tmp);
2362 do_exception_check = 1;
2368 else if(dops[i].rs1) {
2369 if(s1>=0) emit_mov(s1,t);
2370 else emit_loadreg(dops[i].rs1,t);
2372 else if(dops[i].rs2) {
2374 emit_loadreg(dops[i].rs2, t);
2377 if (dops[i].opcode2 & 2) {
2380 do_exception_check = 1;
2391 if (do_exception_check) {
2394 if (t >= 0 && tmp != t)
2396 add_stub_r(OVERFLOW_STUB, jaddr, out, i, 0, i_regs, ccadj_, 0);
2400 else if(dops[i].opcode2==0x2a||dops[i].opcode2==0x2b) { // SLT/SLTU
2402 signed char s1l,s2l,t;
2404 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2407 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2408 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2409 if(dops[i].rs2==0) // rx<r0
2411 if(dops[i].opcode2==0x2a&&dops[i].rs1!=0) { // SLT
2413 emit_shrimm(s1l,31,t);
2415 else // SLTU (unsigned can not be less than zero, 0<0)
2418 else if(dops[i].rs1==0) // r0<rx
2421 if(dops[i].opcode2==0x2a) // SLT
2422 emit_set_gz32(s2l,t);
2423 else // SLTU (set if not zero)
2424 emit_set_nz32(s2l,t);
2427 assert(s1l>=0);assert(s2l>=0);
2428 if(dops[i].opcode2==0x2a) // SLT
2429 emit_set_if_less32(s1l,s2l,t);
2431 emit_set_if_carry32(s1l,s2l,t);
2437 else if(dops[i].opcode2>=0x24&&dops[i].opcode2<=0x27) { // AND/OR/XOR/NOR
2439 signed char s1l,s2l,tl;
2440 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
2443 s1l=get_reg(i_regs->regmap,dops[i].rs1);
2444 s2l=get_reg(i_regs->regmap,dops[i].rs2);
2445 if(dops[i].rs1&&dops[i].rs2) {
2448 if(dops[i].opcode2==0x24) { // AND
2449 emit_and(s1l,s2l,tl);
2451 if(dops[i].opcode2==0x25) { // OR
2452 emit_or(s1l,s2l,tl);
2454 if(dops[i].opcode2==0x26) { // XOR
2455 emit_xor(s1l,s2l,tl);
2457 if(dops[i].opcode2==0x27) { // NOR
2458 emit_or(s1l,s2l,tl);
2464 if(dops[i].opcode2==0x24) { // AND
2467 if(dops[i].opcode2==0x25||dops[i].opcode2==0x26) { // OR/XOR
2469 if(s1l>=0) emit_mov(s1l,tl);
2470 else emit_loadreg(dops[i].rs1,tl); // CHECK: regmap_entry?
2474 if(s2l>=0) emit_mov(s2l,tl);
2475 else emit_loadreg(dops[i].rs2,tl); // CHECK: regmap_entry?
2477 else emit_zeroreg(tl);
2479 if(dops[i].opcode2==0x27) { // NOR
2481 if(s1l>=0) emit_not(s1l,tl);
2483 emit_loadreg(dops[i].rs1,tl);
2489 if(s2l>=0) emit_not(s2l,tl);
2491 emit_loadreg(dops[i].rs2,tl);
2495 else emit_movimm(-1,tl);
2504 static void imm16_assemble(int i, const struct regstat *i_regs, int ccadj_)
2506 if (dops[i].opcode==0x0f) { // LUI
2509 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2512 if(!((i_regs->isconst>>t)&1))
2513 emit_movimm(cinfo[i].imm<<16,t);
2517 if(dops[i].opcode==0x08||dops[i].opcode==0x09) { // ADDI/ADDIU
2518 int is_addi = dops[i].may_except;
2519 if (dops[i].rt1 || is_addi) {
2520 signed char s, t, tmp;
2521 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2522 s=get_reg(i_regs->regmap,dops[i].rs1);
2524 tmp = get_reg_temp(i_regs->regmap);
2530 if(!((i_regs->isconst>>t)&1)) {
2531 int sum, do_exception_check = 0;
2533 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2535 emit_addimm_and_set_flags3(t, cinfo[i].imm, tmp);
2536 do_exception_check = 1;
2539 emit_addimm(t, cinfo[i].imm, t);
2541 if (!((i_regs->wasconst >> s) & 1)) {
2543 emit_addimm_and_set_flags3(s, cinfo[i].imm, tmp);
2544 do_exception_check = 1;
2547 emit_addimm(s, cinfo[i].imm, t);
2550 int oflow = add_overflow(constmap[i][s], cinfo[i].imm, sum);
2551 if (is_addi && oflow)
2552 do_exception_check = 2;
2554 emit_movimm(sum, t);
2557 if (do_exception_check) {
2559 if (do_exception_check == 2)
2566 add_stub_r(OVERFLOW_STUB, jaddr, out, i, 0, i_regs, ccadj_, 0);
2572 if(!((i_regs->isconst>>t)&1))
2573 emit_movimm(cinfo[i].imm,t);
2578 else if(dops[i].opcode==0x0a||dops[i].opcode==0x0b) { // SLTI/SLTIU
2580 //assert(dops[i].rs1!=0); // r0 might be valid, but it's probably a bug
2582 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2583 sl=get_reg(i_regs->regmap,dops[i].rs1);
2587 if(dops[i].opcode==0x0a) { // SLTI
2589 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2590 emit_slti32(t,cinfo[i].imm,t);
2592 emit_slti32(sl,cinfo[i].imm,t);
2597 if(i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2598 emit_sltiu32(t,cinfo[i].imm,t);
2600 emit_sltiu32(sl,cinfo[i].imm,t);
2604 // SLTI(U) with r0 is just stupid,
2605 // nonetheless examples can be found
2606 if(dops[i].opcode==0x0a) // SLTI
2607 if(0<cinfo[i].imm) emit_movimm(1,t);
2608 else emit_zeroreg(t);
2611 if(cinfo[i].imm) emit_movimm(1,t);
2612 else emit_zeroreg(t);
2618 else if(dops[i].opcode>=0x0c&&dops[i].opcode<=0x0e) { // ANDI/ORI/XORI
2621 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
2622 sl=get_reg(i_regs->regmap,dops[i].rs1);
2623 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2624 if(dops[i].opcode==0x0c) //ANDI
2628 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
2629 emit_andimm(tl,cinfo[i].imm,tl);
2631 if(!((i_regs->wasconst>>sl)&1))
2632 emit_andimm(sl,cinfo[i].imm,tl);
2634 emit_movimm(constmap[i][sl]&cinfo[i].imm,tl);
2644 if(i_regs->regmap_entry[tl]!=dops[i].rs1) emit_loadreg(dops[i].rs1,tl);
2646 if(dops[i].opcode==0x0d) { // ORI
2648 emit_orimm(tl,cinfo[i].imm,tl);
2650 if(!((i_regs->wasconst>>sl)&1))
2651 emit_orimm(sl,cinfo[i].imm,tl);
2653 emit_movimm(constmap[i][sl]|cinfo[i].imm,tl);
2656 if(dops[i].opcode==0x0e) { // XORI
2658 emit_xorimm(tl,cinfo[i].imm,tl);
2660 if(!((i_regs->wasconst>>sl)&1))
2661 emit_xorimm(sl,cinfo[i].imm,tl);
2663 emit_movimm(constmap[i][sl]^cinfo[i].imm,tl);
2668 emit_movimm(cinfo[i].imm,tl);
2676 static void shiftimm_assemble(int i, const struct regstat *i_regs)
2678 if(dops[i].opcode2<=0x3) // SLL/SRL/SRA
2682 t=get_reg_w(i_regs->regmap, dops[i].rt1);
2683 s=get_reg(i_regs->regmap,dops[i].rs1);
2685 if(t>=0&&!((i_regs->isconst>>t)&1)){
2692 if(s<0&&i_regs->regmap_entry[t]!=dops[i].rs1) emit_loadreg(dops[i].rs1,t);
2694 if(dops[i].opcode2==0) // SLL
2696 emit_shlimm(s<0?t:s,cinfo[i].imm,t);
2698 if(dops[i].opcode2==2) // SRL
2700 emit_shrimm(s<0?t:s,cinfo[i].imm,t);
2702 if(dops[i].opcode2==3) // SRA
2704 emit_sarimm(s<0?t:s,cinfo[i].imm,t);
2708 if(s>=0 && s!=t) emit_mov(s,t);
2712 //emit_storereg(dops[i].rt1,t); //DEBUG
2715 if(dops[i].opcode2>=0x38&&dops[i].opcode2<=0x3b) // DSLL/DSRL/DSRA
2719 if(dops[i].opcode2==0x3c) // DSLL32
2723 if(dops[i].opcode2==0x3e) // DSRL32
2727 if(dops[i].opcode2==0x3f) // DSRA32
2733 #ifndef shift_assemble
2734 static void shift_assemble(int i, const struct regstat *i_regs)
2736 signed char s,t,shift;
2737 if (dops[i].rt1 == 0)
2739 assert(dops[i].opcode2<=0x07); // SLLV/SRLV/SRAV
2740 t = get_reg(i_regs->regmap, dops[i].rt1);
2741 s = get_reg(i_regs->regmap, dops[i].rs1);
2742 shift = get_reg(i_regs->regmap, dops[i].rs2);
2748 else if(dops[i].rs2==0) {
2750 if(s!=t) emit_mov(s,t);
2753 host_tempreg_acquire();
2754 emit_andimm(shift,31,HOST_TEMPREG);
2755 switch(dops[i].opcode2) {
2757 emit_shl(s,HOST_TEMPREG,t);
2760 emit_shr(s,HOST_TEMPREG,t);
2763 emit_sar(s,HOST_TEMPREG,t);
2768 host_tempreg_release();
2782 static int get_ptr_mem_type(u_int a)
2784 if(a < 0x00200000) {
2785 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2786 // return wrong, must use memhandler for BIOS self-test to pass
2787 // 007 does similar stuff from a00 mirror, weird stuff
2791 if(0x1f800000 <= a && a < 0x1f801000)
2793 if(0x80200000 <= a && a < 0x80800000)
2795 if(0xa0000000 <= a && a < 0xa0200000)
2800 static int get_ro_reg(const struct regstat *i_regs, int host_tempreg_free)
2802 int r = get_reg(i_regs->regmap, ROREG);
2803 if (r < 0 && host_tempreg_free) {
2804 host_tempreg_acquire();
2805 emit_loadreg(ROREG, r = HOST_TEMPREG);
2812 static void *emit_fastpath_cmp_jump(int i, const struct regstat *i_regs,
2813 int addr, int *offset_reg, int *addr_reg_override, int ccadj_)
2817 int mr = dops[i].rs1;
2820 if(((smrv_strong|smrv_weak)>>mr)&1) {
2821 type=get_ptr_mem_type(smrv[mr]);
2822 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2825 // use the mirror we are running on
2826 type=get_ptr_mem_type(start);
2827 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2830 if (dops[i].may_except) {
2832 u_int op = dops[i].opcode;
2833 int mask = ((op & 0x37) == 0x21 || op == 0x25) ? 1 : 3; // LH/SH/LHU
2835 emit_testimm(addr, mask);
2838 add_stub_r(ALIGNMENT_STUB, jaddr2, out, i, addr, i_regs, ccadj_, 0);
2841 if(type==MTYPE_8020) { // RAM 80200000+ mirror
2842 host_tempreg_acquire();
2843 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2844 addr=*addr_reg_override=HOST_TEMPREG;
2847 else if(type==MTYPE_0000) { // RAM 0 mirror
2848 host_tempreg_acquire();
2849 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2850 addr=*addr_reg_override=HOST_TEMPREG;
2853 else if(type==MTYPE_A000) { // RAM A mirror
2854 host_tempreg_acquire();
2855 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2856 addr=*addr_reg_override=HOST_TEMPREG;
2859 else if(type==MTYPE_1F80) { // scratchpad
2860 if (psxH == (void *)0x1f800000) {
2861 host_tempreg_acquire();
2862 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
2863 emit_cmpimm(HOST_TEMPREG,0x1000);
2864 host_tempreg_release();
2869 // do the usual RAM check, jump will go to the right handler
2874 if (type == 0) // need ram check
2876 emit_cmpimm(addr,RAM_SIZE);
2878 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2879 // Hint to branch predictor that the branch is unlikely to be taken
2880 if (dops[i].rs1 >= 28)
2881 emit_jno_unlikely(0);
2885 if (ram_offset != 0)
2886 *offset_reg = get_ro_reg(i_regs, 0);
2892 // return memhandler, or get directly accessable address and return 0
2893 static void *get_direct_memhandler(void *table, u_int addr,
2894 enum stub_type type, uintptr_t *addr_host)
2896 uintptr_t msb = 1ull << (sizeof(uintptr_t)*8 - 1);
2897 uintptr_t l1, l2 = 0;
2898 l1 = ((uintptr_t *)table)[addr>>12];
2900 uintptr_t v = l1 << 1;
2901 *addr_host = v + addr;
2906 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2907 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2908 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2909 l2 = ((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2911 l2 = ((uintptr_t *)l1)[(addr&0xfff)/4];
2913 uintptr_t v = l2 << 1;
2914 *addr_host = v + (addr&0xfff);
2917 return (void *)(l2 << 1);
2921 static u_int get_host_reglist(const signed char *regmap)
2923 u_int reglist = 0, hr;
2924 for (hr = 0; hr < HOST_REGS; hr++) {
2925 if (hr != EXCLUDE_REG && regmap[hr] >= 0)
2931 static u_int reglist_exclude(u_int reglist, int r1, int r2)
2934 reglist &= ~(1u << r1);
2936 reglist &= ~(1u << r2);
2940 // find a temp caller-saved register not in reglist (so assumed to be free)
2941 static int reglist_find_free(u_int reglist)
2943 u_int free_regs = ~reglist & CALLER_SAVE_REGS;
2946 return __builtin_ctz(free_regs);
2949 static void do_load_word(int a, int rt, int offset_reg)
2951 if (offset_reg >= 0)
2952 emit_ldr_dualindexed(offset_reg, a, rt);
2954 emit_readword_indexed(0, a, rt);
2957 static void do_store_word(int a, int ofs, int rt, int offset_reg, int preseve_a)
2959 if (offset_reg < 0) {
2960 emit_writeword_indexed(rt, ofs, a);
2964 emit_addimm(a, ofs, a);
2965 emit_str_dualindexed(offset_reg, a, rt);
2966 if (ofs != 0 && preseve_a)
2967 emit_addimm(a, -ofs, a);
2970 static void do_store_hword(int a, int ofs, int rt, int offset_reg, int preseve_a)
2972 if (offset_reg < 0) {
2973 emit_writehword_indexed(rt, ofs, a);
2977 emit_addimm(a, ofs, a);
2978 emit_strh_dualindexed(offset_reg, a, rt);
2979 if (ofs != 0 && preseve_a)
2980 emit_addimm(a, -ofs, a);
2983 static void do_store_byte(int a, int rt, int offset_reg)
2985 if (offset_reg >= 0)
2986 emit_strb_dualindexed(offset_reg, a, rt);
2988 emit_writebyte_indexed(rt, 0, a);
2991 static void load_assemble(int i, const struct regstat *i_regs, int ccadj_)
2993 int addr = cinfo[i].addr;
2997 int memtarget=0,c=0;
2998 int offset_reg = -1;
2999 int fastio_reg_override = -1;
3000 u_int reglist=get_host_reglist(i_regs->regmap);
3001 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3002 s=get_reg(i_regs->regmap,dops[i].rs1);
3003 offset=cinfo[i].imm;
3004 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3006 c=(i_regs->wasconst>>s)&1;
3008 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3011 //printf("load_assemble: c=%d\n",c);
3012 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
3013 if(tl<0 && ((!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80) || dops[i].rt1==0)) {
3014 // could be FIFO, must perform the read
3016 assem_debug("(forced read)\n");
3017 tl = get_reg_temp(i_regs->regmap); // may be == addr
3022 //printf("load_assemble: c=%d\n",c);
3023 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
3027 // Strmnnrmn's speed hack
3028 if(dops[i].rs1!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3031 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
3032 &offset_reg, &fastio_reg_override, ccadj_);
3035 else if (ram_offset && memtarget) {
3036 offset_reg = get_ro_reg(i_regs, 0);
3038 int dummy=(dops[i].rt1==0)||(tl!=get_reg_w(i_regs->regmap, dops[i].rt1)); // ignore loads to r0 and unneeded reg
3039 switch (dops[i].opcode) {
3044 if (fastio_reg_override >= 0)
3045 a = fastio_reg_override;
3047 if (offset_reg >= 0)
3048 emit_ldrsb_dualindexed(offset_reg, a, tl);
3050 emit_movsbl_indexed(0, a, tl);
3053 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3056 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3062 if (fastio_reg_override >= 0)
3063 a = fastio_reg_override;
3064 if (offset_reg >= 0)
3065 emit_ldrsh_dualindexed(offset_reg, a, tl);
3067 emit_movswl_indexed(0, a, tl);
3070 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3073 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3079 if (fastio_reg_override >= 0)
3080 a = fastio_reg_override;
3081 do_load_word(a, tl, offset_reg);
3084 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3087 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3093 if (fastio_reg_override >= 0)
3094 a = fastio_reg_override;
3096 if (offset_reg >= 0)
3097 emit_ldrb_dualindexed(offset_reg, a, tl);
3099 emit_movzbl_indexed(0, a, tl);
3102 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3105 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3111 if (fastio_reg_override >= 0)
3112 a = fastio_reg_override;
3113 if (offset_reg >= 0)
3114 emit_ldrh_dualindexed(offset_reg, a, tl);
3116 emit_movzwl_indexed(0, a, tl);
3119 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3122 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,dops[i].rt1,ccadj_,reglist);
3128 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3129 host_tempreg_release();
3132 #ifndef loadlr_assemble
3133 static void loadlr_assemble(int i, const struct regstat *i_regs, int ccadj_)
3135 int addr = cinfo[i].addr;
3136 int s,tl,temp,temp2;
3139 int memtarget=0,c=0;
3140 int offset_reg = -1;
3141 int fastio_reg_override = -1;
3142 u_int reglist=get_host_reglist(i_regs->regmap);
3143 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3144 s=get_reg(i_regs->regmap,dops[i].rs1);
3145 temp=get_reg_temp(i_regs->regmap);
3146 temp2=get_reg(i_regs->regmap,FTEMP);
3147 offset=cinfo[i].imm;
3151 c=(i_regs->wasconst>>s)&1;
3153 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3157 emit_shlimm(addr,3,temp);
3158 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3159 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
3161 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
3163 jaddr = emit_fastpath_cmp_jump(i, i_regs, temp2,
3164 &offset_reg, &fastio_reg_override, ccadj_);
3167 if (ram_offset && memtarget) {
3168 offset_reg = get_ro_reg(i_regs, 0);
3170 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
3171 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
3173 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
3176 if (dops[i].opcode==0x22||dops[i].opcode==0x26) { // LWL/LWR
3179 if (fastio_reg_override >= 0)
3180 a = fastio_reg_override;
3181 do_load_word(a, temp2, offset_reg);
3182 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3183 host_tempreg_release();
3184 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj_,reglist);
3187 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj_,reglist);
3190 emit_andimm(temp,24,temp);
3191 if (dops[i].opcode==0x22) // LWL
3192 emit_xorimm(temp,24,temp);
3193 host_tempreg_acquire();
3194 emit_movimm(-1,HOST_TEMPREG);
3195 if (dops[i].opcode==0x26) {
3196 emit_shr(temp2,temp,temp2);
3197 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
3199 emit_shl(temp2,temp,temp2);
3200 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
3202 host_tempreg_release();
3203 emit_or(temp2,tl,tl);
3205 //emit_storereg(dops[i].rt1,tl); // DEBUG
3207 if (dops[i].opcode==0x1A||dops[i].opcode==0x1B) { // LDL/LDR
3213 static void do_invstub(int n)
3216 assem_debug("do_invstub %x\n", start + stubs[n].e*4);
3217 u_int reglist = stubs[n].a;
3218 u_int addrr = stubs[n].b;
3219 int ofs_start = stubs[n].c;
3220 int ofs_end = stubs[n].d;
3221 int len = ofs_end - ofs_start;
3224 set_jump_target(stubs[n].addr, out);
3226 if (addrr != 0 || ofs_start != 0)
3227 emit_addimm(addrr, ofs_start, 0);
3228 emit_readword(&inv_code_start, 2);
3229 emit_readword(&inv_code_end, 3);
3231 emit_addimm(0, len + 4, (rightr = 1));
3233 emit_cmpcs(3, rightr);
3236 void *func = (len != 0)
3237 ? (void *)ndrc_write_invalidate_many
3238 : (void *)ndrc_write_invalidate_one;
3239 emit_far_call(func);
3240 set_jump_target(jaddr, out);
3241 restore_regs(reglist);
3242 emit_jmp(stubs[n].retaddr);
3245 static void do_store_smc_check(int i, const struct regstat *i_regs, u_int reglist, int addr)
3247 if (HACK_ENABLED(NDHACK_NO_SMC_CHECK))
3249 // this can't be used any more since we started to check exact
3250 // block boundaries in invalidate_range()
3251 //if (i_regs->waswritten & (1<<dops[i].rs1))
3253 // (naively) assume nobody will run code from stack
3254 if (dops[i].rs1 == 29)
3257 int j, imm_maxdiff = 32, imm_min = cinfo[i].imm, imm_max = cinfo[i].imm, count = 1;
3258 if (i < slen - 1 && dops[i+1].is_store && dops[i+1].rs1 == dops[i].rs1
3259 && abs(cinfo[i+1].imm - cinfo[i].imm) <= imm_maxdiff)
3261 for (j = i - 1; j >= 0; j--) {
3262 if (!dops[j].is_store || dops[j].rs1 != dops[i].rs1
3263 || abs(cinfo[j].imm - cinfo[j+1].imm) > imm_maxdiff)
3266 if (imm_min > cinfo[j].imm)
3267 imm_min = cinfo[j].imm;
3268 if (imm_max < cinfo[j].imm)
3269 imm_max = cinfo[j].imm;
3271 #if defined(HOST_IMM8)
3272 int ir = get_reg(i_regs->regmap, INVCP);
3274 host_tempreg_acquire();
3275 emit_ldrb_indexedsr12_reg(ir, addr, HOST_TEMPREG);
3277 emit_cmpmem_indexedsr12_imm(invalid_code, addr, 1);
3280 #ifdef INVALIDATE_USE_COND_CALL
3282 emit_cmpimm(HOST_TEMPREG, 1);
3283 emit_callne(invalidate_addr_reg[addr]);
3284 host_tempreg_release();
3288 void *jaddr = emit_cbz(HOST_TEMPREG, 0);
3289 host_tempreg_release();
3290 imm_min -= cinfo[i].imm;
3291 imm_max -= cinfo[i].imm;
3292 add_stub(INVCODE_STUB, jaddr, out, reglist|(1<<HOST_CCREG),
3293 addr, imm_min, imm_max, i);
3296 // determines if code overwrite checking is needed only
3297 // (also true non-existent 0x20000000 mirror that shouldn't matter)
3298 #define is_ram_addr(a) !((a) & 0x5f800000)
3300 static void store_assemble(int i, const struct regstat *i_regs, int ccadj_)
3303 int addr = cinfo[i].addr;
3306 enum stub_type type=0;
3307 int memtarget=0,c=0;
3308 int offset_reg = -1;
3309 int fastio_reg_override = -1;
3310 u_int addr_const = ~0;
3311 u_int reglist=get_host_reglist(i_regs->regmap);
3312 tl=get_reg(i_regs->regmap,dops[i].rs2);
3313 s=get_reg(i_regs->regmap,dops[i].rs1);
3314 offset=cinfo[i].imm;
3316 c=(i_regs->wasconst>>s)&1;
3318 addr_const = constmap[i][s] + offset;
3319 memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
3324 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3325 reglist |= 1u << addr;
3327 jaddr = emit_fastpath_cmp_jump(i, i_regs, addr,
3328 &offset_reg, &fastio_reg_override, ccadj_);
3330 else if (ram_offset && memtarget) {
3331 offset_reg = get_ro_reg(i_regs, 0);
3334 switch (dops[i].opcode) {
3338 if (fastio_reg_override >= 0)
3339 a = fastio_reg_override;
3340 do_store_byte(a, tl, offset_reg);
3347 if (fastio_reg_override >= 0)
3348 a = fastio_reg_override;
3349 do_store_hword(a, 0, tl, offset_reg, 1);
3356 if (fastio_reg_override >= 0)
3357 a = fastio_reg_override;
3358 do_store_word(a, 0, tl, offset_reg, 1);
3365 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3366 host_tempreg_release();
3368 // PCSX store handlers don't check invcode again
3369 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3371 if (!c || is_ram_addr(addr_const))
3372 do_store_smc_check(i, i_regs, reglist, addr);
3373 if (c && !memtarget)
3374 inline_writestub(type, i, addr_const, i_regs->regmap, dops[i].rs2, ccadj_, reglist);
3375 // basic current block modification detection..
3376 // not looking back as that should be in mips cache already
3377 // (see Spyro2 title->attract mode)
3378 if (start + i*4 < addr_const && addr_const < start + slen*4) {
3379 SysPrintf("write to %08x hits block %08x, pc=%08x\n", addr_const, start, start+i*4);
3380 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3381 if(i_regs->regmap==regs[i].regmap) {
3382 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3383 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
3384 emit_movimm(start+i*4+4,0);
3385 emit_writeword(0,&pcaddr);
3386 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3387 emit_far_call(ndrc_get_addr_ht);
3393 static void storelr_assemble(int i, const struct regstat *i_regs, int ccadj_)
3395 int addr = cinfo[i].addr;
3399 void *case1, *case23, *case3;
3400 void *done0, *done1, *done2;
3401 int memtarget=0,c=0;
3402 int offset_reg = -1;
3403 u_int addr_const = ~0;
3404 u_int reglist = get_host_reglist(i_regs->regmap);
3405 tl=get_reg(i_regs->regmap,dops[i].rs2);
3406 s=get_reg(i_regs->regmap,dops[i].rs1);
3407 offset=cinfo[i].imm;
3409 c = (i_regs->isconst >> s) & 1;
3411 addr_const = constmap[i][s] + offset;
3412 memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
3417 reglist |= 1u << addr;
3419 emit_cmpimm(addr, RAM_SIZE);
3425 if(!memtarget||!dops[i].rs1) {
3431 offset_reg = get_ro_reg(i_regs, 0);
3433 emit_testimm(addr,2);
3436 emit_testimm(addr,1);
3440 if (dops[i].opcode == 0x2A) { // SWL
3441 // Write msb into least significant byte
3442 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
3443 do_store_byte(addr, tl, offset_reg);
3444 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3446 else if (dops[i].opcode == 0x2E) { // SWR
3447 // Write entire word
3448 do_store_word(addr, 0, tl, offset_reg, 1);
3453 set_jump_target(case1, out);
3454 if (dops[i].opcode == 0x2A) { // SWL
3455 // Write two msb into two least significant bytes
3456 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3457 do_store_hword(addr, -1, tl, offset_reg, 1);
3458 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3460 else if (dops[i].opcode == 0x2E) { // SWR
3461 // Write 3 lsb into three most significant bytes
3462 do_store_byte(addr, tl, offset_reg);
3463 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3464 do_store_hword(addr, 1, tl, offset_reg, 1);
3465 if (dops[i].rs2) emit_rorimm(tl, 24, tl);
3470 set_jump_target(case23, out);
3471 emit_testimm(addr,1);
3475 if (dops[i].opcode==0x2A) { // SWL
3476 // Write 3 msb into three least significant bytes
3477 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3478 do_store_hword(addr, -2, tl, offset_reg, 1);
3479 if (dops[i].rs2) emit_rorimm(tl, 16, tl);
3480 do_store_byte(addr, tl, offset_reg);
3481 if (dops[i].rs2) emit_rorimm(tl, 8, tl);
3483 else if (dops[i].opcode == 0x2E) { // SWR
3484 // Write two lsb into two most significant bytes
3485 do_store_hword(addr, 0, tl, offset_reg, 1);
3490 set_jump_target(case3, out);
3491 if (dops[i].opcode == 0x2A) { // SWL
3492 do_store_word(addr, -3, tl, offset_reg, 1);
3494 else if (dops[i].opcode == 0x2E) { // SWR
3495 do_store_byte(addr, tl, offset_reg);
3497 set_jump_target(done0, out);
3498 set_jump_target(done1, out);
3499 set_jump_target(done2, out);
3500 if (offset_reg == HOST_TEMPREG)
3501 host_tempreg_release();
3502 if (!c || !memtarget)
3503 add_stub_r(STORELR_STUB,jaddr,out,i,addr,i_regs,ccadj_,reglist);
3504 if (!c || is_ram_addr(addr_const))
3505 do_store_smc_check(i, i_regs, reglist, addr);
3508 static void cop0_assemble(int i, const struct regstat *i_regs, int ccadj_)
3510 if(dops[i].opcode2==0) // MFC0
3512 signed char t=get_reg_w(i_regs->regmap, dops[i].rt1);
3513 u_int copr=(source[i]>>11)&0x1f;
3514 if(t>=0&&dops[i].rt1!=0) {
3515 emit_readword(®_cop0[copr],t);
3518 else if(dops[i].opcode2==4) // MTC0
3520 int s = get_reg(i_regs->regmap, dops[i].rs1);
3521 int cc = get_reg(i_regs->regmap, CCREG);
3522 char copr=(source[i]>>11)&0x1f;
3524 wb_register(dops[i].rs1,i_regs->regmap,i_regs->dirty);
3525 if (copr == 12 || copr == 13) {
3526 emit_readword(&last_count,HOST_TEMPREG);
3527 if (cc != HOST_CCREG)
3528 emit_loadreg(CCREG, HOST_CCREG);
3529 emit_add(HOST_CCREG, HOST_TEMPREG, HOST_CCREG);
3530 emit_addimm(HOST_CCREG, ccadj_ + 2, HOST_CCREG);
3531 emit_writeword(HOST_CCREG, &psxRegs.cycle);
3533 // burn cycles to cause cc_interrupt, which will
3534 // reschedule next_interupt. Relies on CCREG from above.
3535 assem_debug("MTC0 DS %d\n", copr);
3536 emit_writeword(HOST_CCREG,&last_count);
3537 emit_movimm(0,HOST_CCREG);
3538 emit_storereg(CCREG,HOST_CCREG);
3539 emit_loadreg(dops[i].rs1,1);
3540 emit_movimm(copr,0);
3541 emit_far_call(pcsx_mtc0_ds);
3542 emit_loadreg(dops[i].rs1,s);
3545 emit_movimm(start+i*4+4,HOST_TEMPREG);
3546 emit_writeword(HOST_TEMPREG,&pcaddr);
3547 emit_movimm(0,HOST_TEMPREG);
3548 emit_writeword(HOST_TEMPREG,&pending_exception);
3552 emit_movimm(copr, 0);
3553 emit_far_call(pcsx_mtc0);
3554 if (copr == 12 || copr == 13) {
3555 emit_readword(&psxRegs.cycle,HOST_CCREG);
3556 emit_readword(&last_count,HOST_TEMPREG);
3557 emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3558 //emit_writeword(HOST_TEMPREG,&last_count);
3559 assert(!is_delayslot);
3560 emit_readword(&pending_exception,HOST_TEMPREG);
3561 emit_test(HOST_TEMPREG,HOST_TEMPREG);
3564 emit_readword(&pcaddr, 0);
3565 emit_far_call(ndrc_get_addr_ht);
3567 set_jump_target(jaddr, out);
3568 emit_addimm(HOST_CCREG, -ccadj_ - 2, HOST_CCREG);
3569 if (cc != HOST_CCREG)
3570 emit_storereg(CCREG, HOST_CCREG);
3572 emit_loadreg(dops[i].rs1,s);
3576 static void rfe_assemble(int i, const struct regstat *i_regs)
3578 emit_readword(&psxRegs.CP0.n.SR, 0);
3579 emit_andimm(0, 0x3c, 1);
3580 emit_andimm(0, ~0xf, 0);
3581 emit_orrshr_imm(1, 2, 0);
3582 emit_writeword(0, &psxRegs.CP0.n.SR);
3585 static int cop2_is_stalling_op(int i, int *cycles)
3587 if (dops[i].opcode == 0x3a) { // SWC2
3591 if (dops[i].itype == COP2 && (dops[i].opcode2 == 0 || dops[i].opcode2 == 2)) { // MFC2/CFC2
3595 if (dops[i].itype == C2OP) {
3596 *cycles = gte_cycletab[source[i] & 0x3f];
3599 // ... what about MTC2/CTC2/LWC2?
3604 static void log_gte_stall(int stall, u_int cycle)
3606 if ((u_int)stall <= 44)
3607 printf("x stall %2d %u\n", stall, cycle + last_count);
3610 static void emit_log_gte_stall(int i, int stall, u_int reglist)
3614 emit_movimm(stall, 0);
3616 emit_mov(HOST_TEMPREG, 0);
3617 emit_addimm(HOST_CCREG, cinfo[i].ccadj, 1);
3618 emit_far_call(log_gte_stall);
3619 restore_regs(reglist);
3623 static void cop2_do_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
3625 int j = i, other_gte_op_cycles = -1, stall = -MAXBLOCK, cycles_passed;
3626 int rtmp = reglist_find_free(reglist);
3628 if (HACK_ENABLED(NDHACK_NO_STALLS))
3630 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3631 // happens occasionally... cc evicted? Don't bother then
3632 //printf("no cc %08x\n", start + i*4);
3636 for (j = i - 1; j >= 0; j--) {
3637 //if (dops[j].is_ds) break;
3638 if (cop2_is_stalling_op(j, &other_gte_op_cycles) || dops[j].bt)
3640 if (j > 0 && cinfo[j - 1].ccadj > cinfo[j].ccadj)
3645 cycles_passed = cinfo[i].ccadj - cinfo[j].ccadj;
3646 if (other_gte_op_cycles >= 0)
3647 stall = other_gte_op_cycles - cycles_passed;
3648 else if (cycles_passed >= 44)
3649 stall = 0; // can't stall
3650 if (stall == -MAXBLOCK && rtmp >= 0) {
3651 // unknown stall, do the expensive runtime check
3652 assem_debug("; cop2_do_stall_check\n");
3655 emit_movimm(gte_cycletab[op], 0);
3656 emit_addimm(HOST_CCREG, cinfo[i].ccadj, 1);
3657 emit_far_call(call_gteStall);
3658 restore_regs(reglist);
3660 host_tempreg_acquire();
3661 emit_readword(&psxRegs.gteBusyCycle, rtmp);
3662 emit_addimm(rtmp, -cinfo[i].ccadj, rtmp);
3663 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3664 emit_cmpimm(HOST_TEMPREG, 44);
3665 emit_cmovb_reg(rtmp, HOST_CCREG);
3666 //emit_log_gte_stall(i, 0, reglist);
3667 host_tempreg_release();
3670 else if (stall > 0) {
3671 //emit_log_gte_stall(i, stall, reglist);
3672 emit_addimm(HOST_CCREG, stall, HOST_CCREG);
3675 // save gteBusyCycle, if needed
3676 if (gte_cycletab[op] == 0)
3678 other_gte_op_cycles = -1;
3679 for (j = i + 1; j < slen; j++) {
3680 if (cop2_is_stalling_op(j, &other_gte_op_cycles))
3682 if (dops[j].is_jump) {
3684 if (j + 1 < slen && cop2_is_stalling_op(j + 1, &other_gte_op_cycles))
3689 if (other_gte_op_cycles >= 0)
3690 // will handle stall when assembling that op
3692 cycles_passed = cinfo[min(j, slen -1)].ccadj - cinfo[i].ccadj;
3693 if (cycles_passed >= 44)
3695 assem_debug("; save gteBusyCycle\n");
3696 host_tempreg_acquire();
3698 emit_readword(&last_count, HOST_TEMPREG);
3699 emit_add(HOST_TEMPREG, HOST_CCREG, HOST_TEMPREG);
3700 emit_addimm(HOST_TEMPREG, cinfo[i].ccadj, HOST_TEMPREG);
3701 emit_addimm(HOST_TEMPREG, gte_cycletab[op]), HOST_TEMPREG);
3702 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3704 emit_addimm(HOST_CCREG, cinfo[i].ccadj + gte_cycletab[op], HOST_TEMPREG);
3705 emit_writeword(HOST_TEMPREG, &psxRegs.gteBusyCycle);
3707 host_tempreg_release();
3710 static int is_mflohi(int i)
3712 return (dops[i].itype == MOV && (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG));
3715 static int check_multdiv(int i, int *cycles)
3717 if (dops[i].itype != MULTDIV)
3719 if (dops[i].opcode2 == 0x18 || dops[i].opcode2 == 0x19) // MULT(U)
3720 *cycles = 11; // approx from 7 11 14
3726 static void multdiv_prepare_stall(int i, const struct regstat *i_regs, int ccadj_)
3728 int j, found = 0, c = 0;
3729 if (HACK_ENABLED(NDHACK_NO_STALLS))
3731 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3732 // happens occasionally... cc evicted? Don't bother then
3735 for (j = i + 1; j < slen; j++) {
3738 if ((found = is_mflohi(j)))
3740 if (dops[j].is_jump) {
3742 if (j + 1 < slen && (found = is_mflohi(j + 1)))
3748 // handle all in multdiv_do_stall()
3750 check_multdiv(i, &c);
3752 assem_debug("; muldiv prepare stall %d\n", c);
3753 host_tempreg_acquire();
3754 emit_addimm(HOST_CCREG, ccadj_ + c, HOST_TEMPREG);
3755 emit_writeword(HOST_TEMPREG, &psxRegs.muldivBusyCycle);
3756 host_tempreg_release();
3759 static void multdiv_do_stall(int i, const struct regstat *i_regs)
3761 int j, known_cycles = 0;
3762 u_int reglist = get_host_reglist(i_regs->regmap);
3763 int rtmp = get_reg_temp(i_regs->regmap);
3765 rtmp = reglist_find_free(reglist);
3766 if (HACK_ENABLED(NDHACK_NO_STALLS))
3768 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG || rtmp < 0) {
3769 // happens occasionally... cc evicted? Don't bother then
3770 //printf("no cc/rtmp %08x\n", start + i*4);
3774 for (j = i - 1; j >= 0; j--) {
3775 if (dops[j].is_ds) break;
3776 if (check_multdiv(j, &known_cycles))
3779 // already handled by this op
3781 if (dops[j].bt || (j > 0 && cinfo[j - 1].ccadj > cinfo[j].ccadj))
3786 if (known_cycles > 0) {
3787 known_cycles -= cinfo[i].ccadj - cinfo[j].ccadj;
3788 assem_debug("; muldiv stall resolved %d\n", known_cycles);
3789 if (known_cycles > 0)
3790 emit_addimm(HOST_CCREG, known_cycles, HOST_CCREG);
3793 assem_debug("; muldiv stall unresolved\n");
3794 host_tempreg_acquire();
3795 emit_readword(&psxRegs.muldivBusyCycle, rtmp);
3796 emit_addimm(rtmp, -cinfo[i].ccadj, rtmp);
3797 emit_sub(rtmp, HOST_CCREG, HOST_TEMPREG);
3798 emit_cmpimm(HOST_TEMPREG, 37);
3799 emit_cmovb_reg(rtmp, HOST_CCREG);
3800 //emit_log_gte_stall(i, 0, reglist);
3801 host_tempreg_release();
3804 static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3814 emit_readword(®_cop2d[copr],tl);
3815 emit_signextend16(tl,tl);
3816 emit_writeword(tl,®_cop2d[copr]); // hmh
3823 emit_readword(®_cop2d[copr],tl);
3824 emit_andimm(tl,0xffff,tl);
3825 emit_writeword(tl,®_cop2d[copr]);
3828 emit_readword(®_cop2d[14],tl); // SXY2
3829 emit_writeword(tl,®_cop2d[copr]);
3833 c2op_mfc2_29_assemble(tl,temp);
3836 emit_readword(®_cop2d[copr],tl);
3841 static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
3845 emit_readword(®_cop2d[13],temp); // SXY1
3846 emit_writeword(sl,®_cop2d[copr]);
3847 emit_writeword(temp,®_cop2d[12]); // SXY0
3848 emit_readword(®_cop2d[14],temp); // SXY2
3849 emit_writeword(sl,®_cop2d[14]);
3850 emit_writeword(temp,®_cop2d[13]); // SXY1
3853 emit_andimm(sl,0x001f,temp);
3854 emit_shlimm(temp,7,temp);
3855 emit_writeword(temp,®_cop2d[9]);
3856 emit_andimm(sl,0x03e0,temp);
3857 emit_shlimm(temp,2,temp);
3858 emit_writeword(temp,®_cop2d[10]);
3859 emit_andimm(sl,0x7c00,temp);
3860 emit_shrimm(temp,3,temp);
3861 emit_writeword(temp,®_cop2d[11]);
3862 emit_writeword(sl,®_cop2d[28]);
3865 emit_xorsar_imm(sl,sl,31,temp);
3866 #if defined(HAVE_ARMV5) || defined(__aarch64__)
3867 emit_clz(temp,temp);
3869 emit_movs(temp,HOST_TEMPREG);
3870 emit_movimm(0,temp);
3871 emit_jeq((int)out+4*4);
3872 emit_addpl_imm(temp,1,temp);
3873 emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
3874 emit_jns((int)out-2*4);
3876 emit_writeword(sl,®_cop2d[30]);
3877 emit_writeword(temp,®_cop2d[31]);
3882 emit_writeword(sl,®_cop2d[copr]);
3887 static void c2ls_assemble(int i, const struct regstat *i_regs, int ccadj_)
3892 int memtarget=0,c=0;
3894 enum stub_type type;
3895 int offset_reg = -1;
3896 int fastio_reg_override = -1;
3897 u_int addr_const = ~0;
3898 u_int reglist=get_host_reglist(i_regs->regmap);
3899 u_int copr=(source[i]>>16)&0x1f;
3900 s=get_reg(i_regs->regmap,dops[i].rs1);
3901 tl=get_reg(i_regs->regmap,FTEMP);
3902 offset=cinfo[i].imm;
3905 if(i_regs->regmap[HOST_CCREG]==CCREG)
3906 reglist&=~(1<<HOST_CCREG);
3911 if (dops[i].opcode==0x3a) { // SWC2
3915 c = (i_regs->isconst >> s) & 1;
3917 addr_const = constmap[i][s] + offset;
3918 memtarget = ((signed int)addr_const) < (signed int)(0x80000000 + RAM_SIZE);
3922 cop2_do_stall_check(0, i, i_regs, reglist);
3924 if (dops[i].opcode==0x3a) { // SWC2
3925 cop2_get_dreg(copr,tl,-1);
3933 emit_jmp(0); // inline_readstub/inline_writestub?
3937 jaddr2 = emit_fastpath_cmp_jump(i, i_regs, ar,
3938 &offset_reg, &fastio_reg_override, ccadj_);
3940 else if (ram_offset && memtarget) {
3941 offset_reg = get_ro_reg(i_regs, 0);
3943 switch (dops[i].opcode) {
3944 case 0x32: { // LWC2
3946 if (fastio_reg_override >= 0)
3947 a = fastio_reg_override;
3948 do_load_word(a, tl, offset_reg);
3951 case 0x3a: { // SWC2
3952 #ifdef DESTRUCTIVE_SHIFT
3953 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3956 if (fastio_reg_override >= 0)
3957 a = fastio_reg_override;
3958 do_store_word(a, 0, tl, offset_reg, 1);
3965 if (fastio_reg_override == HOST_TEMPREG || offset_reg == HOST_TEMPREG)
3966 host_tempreg_release();
3968 add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj_,reglist);
3969 if (dops[i].opcode == 0x3a && (!c || is_ram_addr(addr_const))) // SWC2
3970 do_store_smc_check(i, i_regs, reglist, ar);
3971 if (dops[i].opcode == 0x32) { // LWC2
3972 host_tempreg_acquire();
3973 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3974 host_tempreg_release();
3978 static void cop2_assemble(int i, const struct regstat *i_regs)
3980 u_int copr = (source[i]>>11) & 0x1f;
3981 signed char temp = get_reg_temp(i_regs->regmap);
3983 if (!HACK_ENABLED(NDHACK_NO_STALLS)) {
3984 u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), temp, -1);
3985 if (dops[i].opcode2 == 0 || dops[i].opcode2 == 2) { // MFC2/CFC2
3986 signed char tl = get_reg(i_regs->regmap, dops[i].rt1);
3987 reglist = reglist_exclude(reglist, tl, -1);
3989 cop2_do_stall_check(0, i, i_regs, reglist);
3991 if (dops[i].opcode2==0) { // MFC2
3992 signed char tl=get_reg_w(i_regs->regmap, dops[i].rt1);
3993 if(tl>=0&&dops[i].rt1!=0)
3994 cop2_get_dreg(copr,tl,temp);
3996 else if (dops[i].opcode2==4) { // MTC2
3997 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
3998 cop2_put_dreg(copr,sl,temp);
4000 else if (dops[i].opcode2==2) // CFC2
4002 signed char tl=get_reg_w(i_regs->regmap, dops[i].rt1);
4003 if(tl>=0&&dops[i].rt1!=0)
4004 emit_readword(®_cop2c[copr],tl);
4006 else if (dops[i].opcode2==6) // CTC2
4008 signed char sl=get_reg(i_regs->regmap,dops[i].rs1);
4017 emit_signextend16(sl,temp);
4020 c2op_ctc2_31_assemble(sl,temp);
4026 emit_writeword(temp,®_cop2c[copr]);
4031 static void do_unalignedwritestub(int n)
4033 assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
4035 set_jump_target(stubs[n].addr, out);
4038 struct regstat *i_regs=(struct regstat *)stubs[n].c;
4039 int addr=stubs[n].b;
4040 u_int reglist=stubs[n].e;
4041 signed char *i_regmap=i_regs->regmap;
4042 int temp2=get_reg(i_regmap,FTEMP);
4044 rt=get_reg(i_regmap,dops[i].rs2);
4047 assert(dops[i].opcode==0x2a||dops[i].opcode==0x2e); // SWL/SWR only implemented
4049 reglist&=~(1<<temp2);
4051 // don't bother with it and call write handler
4054 int cc=get_reg(i_regmap,CCREG);
4056 emit_loadreg(CCREG,2);
4057 emit_addimm(cc<0?2:cc,(int)stubs[n].d+1,2);
4058 emit_movimm(start + i*4,3);
4059 emit_writeword(3,&psxRegs.pc);
4060 emit_far_call((dops[i].opcode==0x2a?jump_handle_swl:jump_handle_swr));
4061 emit_addimm(0,-((int)stubs[n].d+1),cc<0?2:cc);
4063 emit_storereg(CCREG,2);
4064 restore_regs(reglist);
4065 emit_jmp(stubs[n].retaddr); // return address
4068 static void do_overflowstub(int n)
4070 assem_debug("do_overflowstub %x\n", start + (u_int)stubs[n].a * 4);
4073 struct regstat *i_regs = (struct regstat *)stubs[n].c;
4074 int ccadj = stubs[n].d;
4075 set_jump_target(stubs[n].addr, out);
4076 wb_dirtys(regs[i].regmap, regs[i].dirty);
4077 exception_assemble(i, i_regs, ccadj);
4080 static void do_alignmentstub(int n)
4082 assem_debug("do_alignmentstub %x\n", start + (u_int)stubs[n].a * 4);
4085 struct regstat *i_regs = (struct regstat *)stubs[n].c;
4086 int ccadj = stubs[n].d;
4087 int is_store = dops[i].itype == STORE || dops[i].opcode == 0x3A; // SWC2
4088 int cause = (dops[i].opcode & 3) << 28;
4089 cause |= is_store ? (R3000E_AdES << 2) : (R3000E_AdEL << 2);
4090 set_jump_target(stubs[n].addr, out);
4091 wb_dirtys(regs[i].regmap, regs[i].dirty);
4092 if (stubs[n].b != 1)
4093 emit_mov(stubs[n].b, 1); // faulting address
4094 emit_movimm(cause, 0);
4095 exception_assemble(i, i_regs, ccadj);
4098 #ifndef multdiv_assemble
4099 void multdiv_assemble(int i,struct regstat *i_regs)
4101 printf("Need multdiv_assemble for this architecture.\n");
4106 static void mov_assemble(int i, const struct regstat *i_regs)
4108 //if(dops[i].opcode2==0x10||dops[i].opcode2==0x12) { // MFHI/MFLO
4109 //if(dops[i].opcode2==0x11||dops[i].opcode2==0x13) { // MTHI/MTLO
4112 tl=get_reg_w(i_regs->regmap, dops[i].rt1);
4115 sl=get_reg(i_regs->regmap,dops[i].rs1);
4116 if(sl>=0) emit_mov(sl,tl);
4117 else emit_loadreg(dops[i].rs1,tl);
4120 if (dops[i].rs1 == HIREG || dops[i].rs1 == LOREG) // MFHI/MFLO
4121 multdiv_do_stall(i, i_regs);
4124 // call interpreter, exception handler, things that change pc/regs/cycles ...
4125 static void call_c_cpu_handler(int i, const struct regstat *i_regs, int ccadj_, u_int pc, void *func)
4127 signed char ccreg=get_reg(i_regs->regmap,CCREG);
4128 assert(ccreg==HOST_CCREG);
4129 assert(!is_delayslot);
4132 emit_movimm(pc,3); // Get PC
4133 emit_readword(&last_count,2);
4134 emit_writeword(3,&psxRegs.pc);
4135 emit_addimm(HOST_CCREG,ccadj_,HOST_CCREG);
4136 emit_add(2,HOST_CCREG,2);
4137 emit_writeword(2,&psxRegs.cycle);
4138 emit_addimm_ptr(FP,(u_char *)&psxRegs - (u_char *)&dynarec_local,0);
4139 emit_far_call(func);
4140 emit_far_jump(jump_to_new_pc);
4143 static void exception_assemble(int i, const struct regstat *i_regs, int ccadj_)
4145 // 'break' tends to be littered around to catch things like
4146 // division by 0 and is almost never executed, so don't emit much code here
4148 if (dops[i].itype == ALU || dops[i].itype == IMM16)
4149 func = is_delayslot ? jump_overflow_ds : jump_overflow;
4150 else if (dops[i].itype == LOAD || dops[i].itype == STORE)
4151 func = is_delayslot ? jump_addrerror_ds : jump_addrerror;
4152 else if (dops[i].opcode2 == 0x0C)
4153 func = is_delayslot ? jump_syscall_ds : jump_syscall;
4155 func = is_delayslot ? jump_break_ds : jump_break;
4156 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) // evicted
4157 emit_loadreg(CCREG, HOST_CCREG);
4158 emit_movimm(start + i*4, 2); // pc
4159 emit_addimm(HOST_CCREG, ccadj_ + CLOCK_ADJUST(1), HOST_CCREG);
4160 emit_far_jump(func);
4163 static void hlecall_bad()
4168 static void hlecall_assemble(int i, const struct regstat *i_regs, int ccadj_)
4170 void *hlefunc = hlecall_bad;
4171 uint32_t hleCode = source[i] & 0x03ffffff;
4172 if (hleCode < ARRAY_SIZE(psxHLEt))
4173 hlefunc = psxHLEt[hleCode];
4175 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4+4, hlefunc);
4178 static void intcall_assemble(int i, const struct regstat *i_regs, int ccadj_)
4180 call_c_cpu_handler(i, i_regs, ccadj_, start + i*4, execI);
4183 static void speculate_mov(int rs,int rt)
4186 smrv_strong_next|=1<<rt;
4191 static void speculate_mov_weak(int rs,int rt)
4194 smrv_weak_next|=1<<rt;
4199 static void speculate_register_values(int i)
4202 memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
4203 // gp,sp are likely to stay the same throughout the block
4204 smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
4205 smrv_weak_next=~smrv_strong_next;
4206 //printf(" llr %08x\n", smrv[4]);
4208 smrv_strong=smrv_strong_next;
4209 smrv_weak=smrv_weak_next;
4210 switch(dops[i].itype) {
4212 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4213 else if((smrv_strong>>dops[i].rs2)&1) speculate_mov(dops[i].rs2,dops[i].rt1);
4214 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
4215 else if((smrv_weak>>dops[i].rs2)&1) speculate_mov_weak(dops[i].rs2,dops[i].rt1);
4217 smrv_strong_next&=~(1<<dops[i].rt1);
4218 smrv_weak_next&=~(1<<dops[i].rt1);
4222 smrv_strong_next&=~(1<<dops[i].rt1);
4223 smrv_weak_next&=~(1<<dops[i].rt1);
4226 if(dops[i].rt1&&is_const(®s[i],dops[i].rt1)) {
4227 int hr = get_reg_w(regs[i].regmap, dops[i].rt1);
4230 if(get_final_value(hr,i,&value))
4231 smrv[dops[i].rt1]=value;
4232 else smrv[dops[i].rt1]=constmap[i][hr];
4233 smrv_strong_next|=1<<dops[i].rt1;
4237 if ((smrv_strong>>dops[i].rs1)&1) speculate_mov(dops[i].rs1,dops[i].rt1);
4238 else if((smrv_weak>>dops[i].rs1)&1) speculate_mov_weak(dops[i].rs1,dops[i].rt1);
4242 if(start<0x2000&&(dops[i].rt1==26||(smrv[dops[i].rt1]>>24)==0xa0)) {
4243 // special case for BIOS
4244 smrv[dops[i].rt1]=0xa0000000;
4245 smrv_strong_next|=1<<dops[i].rt1;
4252 smrv_strong_next&=~(1<<dops[i].rt1);
4253 smrv_weak_next&=~(1<<dops[i].rt1);
4257 if(dops[i].opcode2==0||dops[i].opcode2==2) { // MFC/CFC
4258 smrv_strong_next&=~(1<<dops[i].rt1);
4259 smrv_weak_next&=~(1<<dops[i].rt1);
4263 if (dops[i].opcode==0x32) { // LWC2
4264 smrv_strong_next&=~(1<<dops[i].rt1);
4265 smrv_weak_next&=~(1<<dops[i].rt1);
4271 printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
4272 ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
4276 static void ujump_assemble(int i, const struct regstat *i_regs);
4277 static void rjump_assemble(int i, const struct regstat *i_regs);
4278 static void cjump_assemble(int i, const struct regstat *i_regs);
4279 static void sjump_assemble(int i, const struct regstat *i_regs);
4281 static int assemble(int i, const struct regstat *i_regs, int ccadj_)
4284 switch (dops[i].itype) {
4286 alu_assemble(i, i_regs, ccadj_);
4289 imm16_assemble(i, i_regs, ccadj_);
4292 shift_assemble(i, i_regs);
4295 shiftimm_assemble(i, i_regs);
4298 load_assemble(i, i_regs, ccadj_);
4301 loadlr_assemble(i, i_regs, ccadj_);
4304 store_assemble(i, i_regs, ccadj_);
4307 storelr_assemble(i, i_regs, ccadj_);
4310 cop0_assemble(i, i_regs, ccadj_);
4313 rfe_assemble(i, i_regs);
4316 cop2_assemble(i, i_regs);
4319 c2ls_assemble(i, i_regs, ccadj_);
4322 c2op_assemble(i, i_regs);
4325 multdiv_assemble(i, i_regs);
4326 multdiv_prepare_stall(i, i_regs, ccadj_);
4329 mov_assemble(i, i_regs);
4332 exception_assemble(i, i_regs, ccadj_);
4335 hlecall_assemble(i, i_regs, ccadj_);
4338 intcall_assemble(i, i_regs, ccadj_);
4341 ujump_assemble(i, i_regs);
4345 rjump_assemble(i, i_regs);
4349 cjump_assemble(i, i_regs);
4353 sjump_assemble(i, i_regs);
4358 // not handled, just skip
4366 static void ds_assemble(int i, const struct regstat *i_regs)
4368 speculate_register_values(i);
4370 switch (dops[i].itype) {
4378 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
4381 assemble(i, i_regs, cinfo[i].ccadj);
4386 // Is the branch target a valid internal jump?
4387 static int internal_branch(int addr)
4389 if(addr&1) return 0; // Indirect (register) jump
4390 if(addr>=start && addr<start+slen*4-4)
4397 static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
4400 for(hr=0;hr<HOST_REGS;hr++) {
4401 if(hr!=EXCLUDE_REG) {
4402 if(pre[hr]!=entry[hr]) {
4405 if(get_reg(entry,pre[hr])<0) {
4407 if(!((u>>pre[hr])&1))
4408 emit_storereg(pre[hr],hr);
4415 // Move from one register to another (no writeback)
4416 for(hr=0;hr<HOST_REGS;hr++) {
4417 if(hr!=EXCLUDE_REG) {
4418 if(pre[hr]!=entry[hr]) {
4419 if(pre[hr]>=0&&pre[hr]<TEMPREG) {
4421 if((nr=get_reg(entry,pre[hr]))>=0) {
4430 // Load the specified registers
4431 // This only loads the registers given as arguments because
4432 // we don't want to load things that will be overwritten
4433 static inline void load_reg(signed char entry[], signed char regmap[], int rs)
4435 int hr = get_reg(regmap, rs);
4436 if (hr >= 0 && entry[hr] != regmap[hr])
4437 emit_loadreg(regmap[hr], hr);
4440 static void load_regs(signed char entry[], signed char regmap[], int rs1, int rs2)
4442 load_reg(entry, regmap, rs1);
4444 load_reg(entry, regmap, rs2);
4447 // Load registers prior to the start of a loop
4448 // so that they are not loaded within the loop
4449 static void loop_preload(signed char pre[],signed char entry[])
4452 for (hr = 0; hr < HOST_REGS; hr++) {
4454 if (r >= 0 && pre[hr] != r && get_reg(pre, r) < 0) {
4455 assem_debug("loop preload:\n");
4457 emit_loadreg(r, hr);
4462 // Generate address for load/store instruction
4463 // goes to AGEN (or temp) for writes, FTEMP for LOADLR and cop1/2 loads
4464 // AGEN is assigned by pass5b_preallocate2
4465 static void address_generation(int i, const struct regstat *i_regs, signed char entry[])
4467 if (dops[i].is_load || dops[i].is_store) {
4469 int agr = AGEN1 + (i&1);
4470 if(dops[i].itype==LOAD) {
4471 if (!dops[i].may_except)
4472 ra = get_reg_w(i_regs->regmap, dops[i].rt1); // reuse dest for agen
4474 ra = get_reg_temp(i_regs->regmap);
4476 if(dops[i].itype==LOADLR) {
4477 ra=get_reg(i_regs->regmap,FTEMP);
4479 if(dops[i].itype==STORE||dops[i].itype==STORELR) {
4480 ra=get_reg(i_regs->regmap,agr);
4481 if(ra<0) ra=get_reg_temp(i_regs->regmap);
4483 if(dops[i].itype==C2LS) {
4484 if (dops[i].opcode == 0x32) // LWC2
4485 ra=get_reg(i_regs->regmap,FTEMP);
4487 ra=get_reg(i_regs->regmap,agr);
4488 if(ra<0) ra=get_reg_temp(i_regs->regmap);
4491 int rs = get_reg(i_regs->regmap, dops[i].rs1);
4494 int offset = cinfo[i].imm;
4495 int add_offset = offset != 0;
4496 int c = rs >= 0 && ((i_regs->wasconst >> rs) & 1);
4497 if(dops[i].rs1==0) {
4498 // Using r0 as a base address
4500 if(!entry||entry[ra]!=agr) {
4501 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
4502 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4504 emit_movimm(offset,ra);
4506 } // else did it in the previous cycle
4512 if (!entry || entry[ra] != dops[i].rs1)
4513 emit_loadreg(dops[i].rs1, ra);
4515 //if(!entry||entry[ra]!=dops[i].rs1)
4516 // printf("poor load scheduling!\n");
4519 if(dops[i].rs1!=dops[i].rt1||dops[i].itype!=LOAD) {
4521 if(!entry||entry[ra]!=agr) {
4522 if (dops[i].opcode==0x22||dops[i].opcode==0x26) {
4523 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4525 emit_movimm(constmap[i][rs]+offset,ra);
4526 regs[i].loadedconst|=1<<ra;
4528 } // else did it in the previous cycle
4531 else // else load_consts already did it
4540 emit_addimm(rs,offset,ra);
4542 emit_addimm(ra,offset,ra);
4547 assert(cinfo[i].addr >= 0);
4549 // Preload constants for next instruction
4550 if (dops[i+1].is_load || dops[i+1].is_store) {
4553 agr=AGEN1+((i+1)&1);
4554 ra=get_reg(i_regs->regmap,agr);
4556 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
4557 int offset=cinfo[i+1].imm;
4558 int c=(regs[i+1].wasconst>>rs)&1;
4559 if(c&&(dops[i+1].rs1!=dops[i+1].rt1||dops[i+1].itype!=LOAD)) {
4560 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
4561 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4562 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
4563 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4565 emit_movimm(constmap[i+1][rs]+offset,ra);
4566 regs[i+1].loadedconst|=1<<ra;
4569 else if(dops[i+1].rs1==0) {
4570 // Using r0 as a base address
4571 if (dops[i+1].opcode==0x22||dops[i+1].opcode==0x26) {
4572 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4573 }else if (dops[i+1].opcode==0x1a||dops[i+1].opcode==0x1b) {
4574 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4576 emit_movimm(offset,ra);
4583 static int get_final_value(int hr, int i, u_int *value)
4585 int reg=regs[i].regmap[hr];
4587 if(regs[i+1].regmap[hr]!=reg) break;
4588 if(!((regs[i+1].isconst>>hr)&1)) break;
4589 if(dops[i+1].bt) break;
4593 if (dops[i].is_jump) {
4594 *value=constmap[i][hr];
4598 if (dops[i+1].is_jump) {
4599 // Load in delay slot, out-of-order execution
4600 if(dops[i+2].itype==LOAD&&dops[i+2].rs1==reg&&dops[i+2].rt1==reg&&((regs[i+1].wasconst>>hr)&1))
4602 // Precompute load address
4603 *value=constmap[i][hr]+cinfo[i+2].imm;
4607 if(dops[i+1].itype==LOAD&&dops[i+1].rs1==reg&&dops[i+1].rt1==reg)
4609 // Precompute load address
4610 *value=constmap[i][hr]+cinfo[i+1].imm;
4611 //printf("c=%x imm=%lx\n",(long)constmap[i][hr],cinfo[i+1].imm);
4616 *value=constmap[i][hr];
4617 //printf("c=%lx\n",(long)constmap[i][hr]);
4618 if(i==slen-1) return 1;
4620 return !((unneeded_reg[i+1]>>reg)&1);
4623 // Load registers with known constants
4624 static void load_consts(signed char pre[],signed char regmap[],int i)
4627 // propagate loaded constant flags
4628 if(i==0||dops[i].bt)
4629 regs[i].loadedconst=0;
4631 for (hr = 0; hr < HOST_REGS; hr++) {
4632 if (hr == EXCLUDE_REG || regmap[hr] < 0 || pre[hr] != regmap[hr])
4634 if ((((regs[i-1].isconst & regs[i-1].loadedconst) >> hr) & 1)
4635 && regmap[hr] == regs[i-1].regmap[hr])
4637 regs[i].loadedconst |= 1u << hr;
4642 for(hr=0;hr<HOST_REGS;hr++) {
4643 if(hr!=EXCLUDE_REG&®map[hr]>=0) {
4644 //if(entry[hr]!=regmap[hr]) {
4645 if(!((regs[i].loadedconst>>hr)&1)) {
4646 assert(regmap[hr]<64);
4647 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4648 u_int value, similar=0;
4649 if(get_final_value(hr,i,&value)) {
4650 // see if some other register has similar value
4651 for(hr2=0;hr2<HOST_REGS;hr2++) {
4652 if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
4653 if(is_similar_value(value,constmap[i][hr2])) {
4661 if(get_final_value(hr2,i,&value2)) // is this needed?
4662 emit_movimm_from(value2,hr2,value,hr);
4664 emit_movimm(value,hr);
4670 emit_movimm(value,hr);
4673 regs[i].loadedconst|=1<<hr;
4680 static void load_all_consts(const signed char regmap[], u_int dirty, int i)
4684 for(hr=0;hr<HOST_REGS;hr++) {
4685 if(hr!=EXCLUDE_REG&®map[hr]>=0&&((dirty>>hr)&1)) {
4686 assert(regmap[hr] < 64);
4687 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4688 int value=constmap[i][hr];
4693 emit_movimm(value,hr);
4700 // Write out all dirty registers (except cycle count)
4702 static void wb_dirtys(const signed char i_regmap[], u_int i_dirty)
4705 for(hr=0;hr<HOST_REGS;hr++) {
4706 if(hr!=EXCLUDE_REG) {
4707 if(i_regmap[hr]>0) {
4708 if(i_regmap[hr]!=CCREG) {
4709 if((i_dirty>>hr)&1) {
4710 assert(i_regmap[hr]<64);
4711 emit_storereg(i_regmap[hr],hr);
4720 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4721 // This writes the registers not written by store_regs_bt
4722 static void wb_needed_dirtys(const signed char i_regmap[], u_int i_dirty, int addr)
4725 int t=(addr-start)>>2;
4726 for(hr=0;hr<HOST_REGS;hr++) {
4727 if(hr!=EXCLUDE_REG) {
4728 if(i_regmap[hr]>0) {
4729 if(i_regmap[hr]!=CCREG) {
4730 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
4731 if((i_dirty>>hr)&1) {
4732 assert(i_regmap[hr]<64);
4733 emit_storereg(i_regmap[hr],hr);
4742 // Load all registers (except cycle count)
4743 #ifndef load_all_regs
4744 static void load_all_regs(const signed char i_regmap[])
4747 for(hr=0;hr<HOST_REGS;hr++) {
4748 if(hr!=EXCLUDE_REG) {
4749 if(i_regmap[hr]==0) {
4753 if(i_regmap[hr]>0 && i_regmap[hr]<TEMPREG && i_regmap[hr]!=CCREG)
4755 emit_loadreg(i_regmap[hr],hr);
4762 // Load all current registers also needed by next instruction
4763 static void load_needed_regs(const signed char i_regmap[], const signed char next_regmap[])
4765 signed char regmap_sel[HOST_REGS];
4767 for (hr = 0; hr < HOST_REGS; hr++) {
4768 regmap_sel[hr] = -1;
4769 if (hr != EXCLUDE_REG)
4770 if (next_regmap[hr] == i_regmap[hr] || get_reg(next_regmap, i_regmap[hr]) >= 0)
4771 regmap_sel[hr] = i_regmap[hr];
4773 load_all_regs(regmap_sel);
4776 // Load all regs, storing cycle count if necessary
4777 static void load_regs_entry(int t)
4779 if(dops[t].is_ds) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4780 else if(cinfo[t].ccadj) emit_addimm(HOST_CCREG,-cinfo[t].ccadj,HOST_CCREG);
4781 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4782 emit_storereg(CCREG,HOST_CCREG);
4784 load_all_regs(regs[t].regmap_entry);
4787 // Store dirty registers prior to branch
4788 static void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4790 if(internal_branch(addr))
4792 int t=(addr-start)>>2;
4794 for(hr=0;hr<HOST_REGS;hr++) {
4795 if(hr!=EXCLUDE_REG) {
4796 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4797 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
4798 if((i_dirty>>hr)&1) {
4799 assert(i_regmap[hr]<64);
4800 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4801 emit_storereg(i_regmap[hr],hr);
4810 // Branch out of this block, write out all dirty regs
4811 wb_dirtys(i_regmap,i_dirty);
4815 // Load all needed registers for branch target
4816 static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4818 //if(addr>=start && addr<(start+slen*4))
4819 if(internal_branch(addr))
4821 int t=(addr-start)>>2;
4823 // Store the cycle count before loading something else
4824 if(i_regmap[HOST_CCREG]!=CCREG) {
4825 assert(i_regmap[HOST_CCREG]==-1);
4827 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4828 emit_storereg(CCREG,HOST_CCREG);
4831 for(hr=0;hr<HOST_REGS;hr++) {
4832 if(hr!=EXCLUDE_REG&®s[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
4833 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4834 if(regs[t].regmap_entry[hr]==0) {
4837 else if(regs[t].regmap_entry[hr]!=CCREG)
4839 emit_loadreg(regs[t].regmap_entry[hr],hr);
4847 static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4849 if(addr>=start && addr<start+slen*4-4)
4851 int t=(addr-start)>>2;
4853 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4854 for(hr=0;hr<HOST_REGS;hr++)
4858 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4860 if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4867 if(i_regmap[hr]<TEMPREG)
4869 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4872 else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4878 else // Same register but is it 32-bit or dirty?
4881 if(!((regs[t].dirty>>hr)&1))
4885 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4887 //printf("%x: dirty no match\n",addr);
4895 // Delay slots are not valid branch targets
4896 //if(t>0&&(dops[t-1].is_jump) return 0;
4897 // Delay slots require additional processing, so do not match
4898 if(dops[t].is_ds) return 0;
4903 for(hr=0;hr<HOST_REGS;hr++)
4909 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4924 static void drc_dbg_emit_do_cmp(int i, int ccadj_)
4926 extern void do_insn_cmp();
4928 u_int hr, reglist = get_host_reglist(regs[i].regmap);
4929 reglist |= get_host_reglist(regs[i].regmap_entry);
4930 reglist &= DRC_DBG_REGMASK;
4932 assem_debug("//do_insn_cmp %08x\n", start+i*4);
4934 // write out changed consts to match the interpreter
4935 if (i > 0 && !dops[i].bt) {
4936 for (hr = 0; hr < HOST_REGS; hr++) {
4937 int reg = regs[i].regmap_entry[hr]; // regs[i-1].regmap[hr];
4938 if (hr == EXCLUDE_REG || reg <= 0)
4940 if (!((regs[i-1].isconst >> hr) & 1))
4942 if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
4944 emit_movimm(constmap[i-1][hr],0);
4945 emit_storereg(reg, 0);
4948 if (dops[i].opcode == 0x0f) { // LUI
4949 emit_movimm(cinfo[i].imm << 16, 0);
4950 emit_storereg(dops[i].rt1, 0);
4952 emit_movimm(start+i*4,0);
4953 emit_writeword(0,&pcaddr);
4954 int cc = get_reg(regs[i].regmap_entry, CCREG);
4956 emit_loadreg(CCREG, cc = 0);
4957 emit_addimm(cc, ccadj_, 0);
4958 emit_writeword(0, &psxRegs.cycle);
4959 emit_far_call(do_insn_cmp);
4960 //emit_readword(&cycle,0);
4961 //emit_addimm(0,2,0);
4962 //emit_writeword(0,&cycle);
4964 restore_regs(reglist);
4965 assem_debug("\\\\do_insn_cmp\n");
4967 static void drc_dbg_emit_wb_dirtys(int i, const struct regstat *i_regs)
4969 // write-out non-consts, consts are likely different because of get_final_value()
4970 if (i_regs->dirty & ~i_regs->loadedconst) {
4971 assem_debug("/ drc_dbg_wb\n");
4972 wb_dirtys(i_regs->regmap, i_regs->dirty & ~i_regs->loadedconst);
4973 assem_debug("\\ drc_dbg_wb\n");
4977 #define drc_dbg_emit_do_cmp(x,y)
4978 #define drc_dbg_emit_wb_dirtys(x,y)
4981 // Used when a branch jumps into the delay slot of another branch
4982 static void ds_assemble_entry(int i)
4984 int t = (cinfo[i].ba - start) >> 2;
4985 int ccadj_ = -CLOCK_ADJUST(1);
4987 instr_addr[t] = out;
4988 assem_debug("Assemble delay slot at %x\n",cinfo[i].ba);
4989 assem_debug("<->\n");
4990 drc_dbg_emit_do_cmp(t, ccadj_);
4991 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG)
4992 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
4993 load_regs(regs[t].regmap_entry,regs[t].regmap,dops[t].rs1,dops[t].rs2);
4994 address_generation(t,®s[t],regs[t].regmap_entry);
4995 if (ram_offset && (dops[t].is_load || dops[t].is_store))
4996 load_reg(regs[t].regmap_entry,regs[t].regmap,ROREG);
4997 if (dops[t].is_store)
4998 load_reg(regs[t].regmap_entry,regs[t].regmap,INVCP);
5000 switch (dops[t].itype) {
5008 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
5011 assemble(t, ®s[t], ccadj_);
5013 store_regs_bt(regs[t].regmap,regs[t].dirty,cinfo[i].ba+4);
5014 load_regs_bt(regs[t].regmap,regs[t].dirty,cinfo[i].ba+4);
5015 if(internal_branch(cinfo[i].ba+4))
5016 assem_debug("branch: internal\n");
5018 assem_debug("branch: external\n");
5019 assert(internal_branch(cinfo[i].ba+4));
5020 add_to_linker(out,cinfo[i].ba+4,internal_branch(cinfo[i].ba+4));
5024 // Load 2 immediates optimizing for small code size
5025 static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
5027 emit_movimm(imm1,rt1);
5028 emit_movimm_from(imm1,rt1,imm2,rt2);
5031 static void do_cc(int i, const signed char i_regmap[], int *adj,
5032 int addr, int taken, int invert)
5034 int count, count_plus2;
5038 if(dops[i].itype==RJUMP)
5042 //if(cinfo[i].ba>=start && cinfo[i].ba<(start+slen*4))
5043 if(internal_branch(cinfo[i].ba))
5045 t=(cinfo[i].ba-start)>>2;
5046 if(dops[t].is_ds) *adj=-CLOCK_ADJUST(1); // Branch into delay slot adds an extra cycle
5047 else *adj=cinfo[t].ccadj;
5053 count = cinfo[i].ccadj;
5054 count_plus2 = count + CLOCK_ADJUST(2);
5055 if(taken==TAKEN && i==(cinfo[i].ba-start)>>2 && source[i+1]==0) {
5057 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
5059 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
5060 emit_andimm(HOST_CCREG,3,HOST_CCREG);
5064 else if(*adj==0||invert) {
5065 int cycles = count_plus2;
5070 if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
5071 cycles=*adj+count+2-*adj;
5074 emit_addimm_and_set_flags(cycles, HOST_CCREG);
5080 emit_cmpimm(HOST_CCREG, -count_plus2);
5084 add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:count_plus2,i,addr,taken,0);
5087 static void do_ccstub(int n)
5090 assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
5091 set_jump_target(stubs[n].addr, out);
5093 if (stubs[n].d != TAKEN) {
5094 wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
5097 if(internal_branch(cinfo[i].ba))
5098 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5102 // Save PC as return address
5103 emit_movimm(stubs[n].c,0);
5104 emit_writeword(0,&pcaddr);
5108 // Return address depends on which way the branch goes
5109 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
5111 int s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5112 int s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
5118 else if(dops[i].rs2==0)
5123 #ifdef DESTRUCTIVE_WRITEBACK
5125 if((branch_regs[i].dirty>>s1l)&&1)
5126 emit_loadreg(dops[i].rs1,s1l);
5129 if((branch_regs[i].dirty>>s1l)&1)
5130 emit_loadreg(dops[i].rs2,s1l);
5133 if((branch_regs[i].dirty>>s2l)&1)
5134 emit_loadreg(dops[i].rs2,s2l);
5137 int addr=-1,alt=-1,ntaddr=-1;
5140 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5141 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5142 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5150 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5151 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5152 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5158 if ((dops[i].opcode & 0x3e) == 6) // BLEZ/BGTZ needs another register
5162 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5163 branch_regs[i].regmap[hr]!=dops[i].rs1 &&
5164 branch_regs[i].regmap[hr]!=dops[i].rs2 )
5170 assert(hr<HOST_REGS);
5172 if (dops[i].opcode == 4) // BEQ
5174 #ifdef HAVE_CMOV_IMM
5175 if(s2l>=0) emit_cmp(s1l,s2l);
5176 else emit_test(s1l,s1l);
5177 emit_cmov2imm_e_ne_compact(cinfo[i].ba,start+i*4+8,addr);
5179 emit_mov2imm_compact(cinfo[i].ba,addr,start+i*4+8,alt);
5180 if(s2l>=0) emit_cmp(s1l,s2l);
5181 else emit_test(s1l,s1l);
5182 emit_cmovne_reg(alt,addr);
5185 else if (dops[i].opcode == 5) // BNE
5187 #ifdef HAVE_CMOV_IMM
5188 if(s2l>=0) emit_cmp(s1l,s2l);
5189 else emit_test(s1l,s1l);
5190 emit_cmov2imm_e_ne_compact(start+i*4+8,cinfo[i].ba,addr);
5192 emit_mov2imm_compact(start+i*4+8,addr,cinfo[i].ba,alt);
5193 if(s2l>=0) emit_cmp(s1l,s2l);
5194 else emit_test(s1l,s1l);
5195 emit_cmovne_reg(alt,addr);
5198 else if (dops[i].opcode == 6) // BLEZ
5200 //emit_movimm(cinfo[i].ba,alt);
5201 //emit_movimm(start+i*4+8,addr);
5202 emit_mov2imm_compact(cinfo[i].ba,alt,start+i*4+8,addr);
5204 emit_cmovl_reg(alt,addr);
5206 else if (dops[i].opcode == 7) // BGTZ
5208 //emit_movimm(cinfo[i].ba,addr);
5209 //emit_movimm(start+i*4+8,ntaddr);
5210 emit_mov2imm_compact(cinfo[i].ba,addr,start+i*4+8,ntaddr);
5212 emit_cmovl_reg(ntaddr,addr);
5214 else if (dops[i].itype == SJUMP) // BLTZ/BGEZ
5216 //emit_movimm(cinfo[i].ba,alt);
5217 //emit_movimm(start+i*4+8,addr);
5219 emit_mov2imm_compact(cinfo[i].ba,
5220 (dops[i].opcode2 & 1) ? addr : alt, start + i*4 + 8,
5221 (dops[i].opcode2 & 1) ? alt : addr);
5223 emit_cmovs_reg(alt,addr);
5226 emit_movimm((dops[i].opcode2 & 1) ? cinfo[i].ba : start + i*4 + 8, addr);
5228 emit_writeword(addr, &pcaddr);
5231 if(dops[i].itype==RJUMP)
5233 int r=get_reg(branch_regs[i].regmap,dops[i].rs1);
5234 if (ds_writes_rjump_rs(i)) {
5235 r=get_reg(branch_regs[i].regmap,RTEMP);
5237 emit_writeword(r,&pcaddr);
5239 else {SysPrintf("Unknown branch type in do_ccstub\n");abort();}
5241 // Update cycle count
5242 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5243 if(stubs[n].a) emit_addimm(HOST_CCREG,(int)stubs[n].a,HOST_CCREG);
5244 emit_far_call(cc_interrupt);
5245 if(stubs[n].a) emit_addimm(HOST_CCREG,-(int)stubs[n].a,HOST_CCREG);
5246 if(stubs[n].d==TAKEN) {
5247 if(internal_branch(cinfo[i].ba))
5248 load_needed_regs(branch_regs[i].regmap,regs[(cinfo[i].ba-start)>>2].regmap_entry);
5249 else if(dops[i].itype==RJUMP) {
5250 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5251 emit_readword(&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5253 emit_loadreg(dops[i].rs1,get_reg(branch_regs[i].regmap,dops[i].rs1));
5255 }else if(stubs[n].d==NOTTAKEN) {
5256 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5257 else load_all_regs(branch_regs[i].regmap);
5259 load_all_regs(branch_regs[i].regmap);
5261 if (stubs[n].retaddr)
5262 emit_jmp(stubs[n].retaddr);
5264 do_jump_vaddr(stubs[n].e);
5267 static void add_to_linker(void *addr, u_int target, int is_internal)
5269 assert(linkcount < ARRAY_SIZE(link_addr));
5270 link_addr[linkcount].addr = addr;
5271 link_addr[linkcount].target = target;
5272 link_addr[linkcount].internal = is_internal;
5276 static void ujump_assemble_write_ra(int i)
5279 unsigned int return_address;
5280 rt=get_reg(branch_regs[i].regmap,31);
5281 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5283 return_address=start+i*4+8;
5286 if(internal_branch(return_address)&&dops[i+1].rt1!=31) {
5287 int temp=-1; // note: must be ds-safe
5291 if(temp>=0) do_miniht_insert(return_address,rt,temp);
5292 else emit_movimm(return_address,rt);
5300 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5303 if (!((regs[i].loadedconst >> rt) & 1))
5304 emit_movimm(return_address, rt); // PC into link register
5306 emit_prefetch(hash_table_get(return_address));
5312 static void ujump_assemble(int i, const struct regstat *i_regs)
5314 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5315 address_generation(i+1,i_regs,regs[i].regmap_entry);
5317 int temp=get_reg(branch_regs[i].regmap,PTEMP);
5318 if(dops[i].rt1==31&&temp>=0)
5320 signed char *i_regmap=i_regs->regmap;
5321 int return_address=start+i*4+8;
5322 if(get_reg(branch_regs[i].regmap,31)>0)
5323 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5326 if (dops[i].rt1 == 31)
5327 ujump_assemble_write_ra(i); // writeback ra for DS
5328 ds_assemble(i+1,i_regs);
5329 uint64_t bc_unneeded=branch_regs[i].u;
5330 bc_unneeded|=1|(1LL<<dops[i].rt1);
5331 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5332 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5334 cc=get_reg(branch_regs[i].regmap,CCREG);
5335 assert(cc==HOST_CCREG);
5336 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5338 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
5340 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5341 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5342 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5343 if(internal_branch(cinfo[i].ba))
5344 assem_debug("branch: internal\n");
5346 assem_debug("branch: external\n");
5347 if (internal_branch(cinfo[i].ba) && dops[(cinfo[i].ba-start)>>2].is_ds) {
5348 ds_assemble_entry(i);
5351 add_to_linker(out,cinfo[i].ba,internal_branch(cinfo[i].ba));
5356 static void rjump_assemble_write_ra(int i)
5358 int rt,return_address;
5359 rt=get_reg_w(branch_regs[i].regmap, dops[i].rt1);
5360 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5362 return_address=start+i*4+8;
5366 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5369 if (!((regs[i].loadedconst >> rt) & 1))
5370 emit_movimm(return_address, rt); // PC into link register
5372 emit_prefetch(hash_table_get(return_address));
5376 static void rjump_assemble(int i, const struct regstat *i_regs)
5380 rs=get_reg(branch_regs[i].regmap,dops[i].rs1);
5382 if (ds_writes_rjump_rs(i)) {
5383 // Delay slot abuse, make a copy of the branch address register
5384 temp=get_reg(branch_regs[i].regmap,RTEMP);
5386 assert(regs[i].regmap[temp]==RTEMP);
5390 address_generation(i+1,i_regs,regs[i].regmap_entry);
5394 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5395 signed char *i_regmap=i_regs->regmap;
5396 int return_address=start+i*4+8;
5397 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
5402 if(dops[i].rs1==31) {
5403 int rh=get_reg(regs[i].regmap,RHASH);
5404 if(rh>=0) do_preload_rhash(rh);
5407 if (dops[i].rt1 != 0)
5408 rjump_assemble_write_ra(i);
5409 ds_assemble(i+1,i_regs);
5410 uint64_t bc_unneeded=branch_regs[i].u;
5411 bc_unneeded|=1|(1LL<<dops[i].rt1);
5412 bc_unneeded&=~(1LL<<dops[i].rs1);
5413 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5414 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,CCREG);
5415 cc=get_reg(branch_regs[i].regmap,CCREG);
5416 assert(cc==HOST_CCREG);
5419 int rh=get_reg(branch_regs[i].regmap,RHASH);
5420 int ht=get_reg(branch_regs[i].regmap,RHTBL);
5421 if(dops[i].rs1==31) {
5422 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5423 do_preload_rhtbl(ht);
5427 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
5428 #ifdef DESTRUCTIVE_WRITEBACK
5429 if((branch_regs[i].dirty>>rs)&1) {
5430 if(dops[i].rs1!=dops[i+1].rt1&&dops[i].rs1!=dops[i+1].rt2) {
5431 emit_loadreg(dops[i].rs1,rs);
5436 if(dops[i].rt1==31&&temp>=0) emit_prefetchreg(temp);
5439 if(dops[i].rs1==31) {
5440 do_miniht_load(ht,rh);
5443 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5444 //if(adj) emit_addimm(cc,2*(cinfo[i].ccadj+2-adj),cc); // ??? - Shouldn't happen
5446 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
5447 add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
5448 if (dops[i+1].itype == RFE)
5449 // special case for RFE
5453 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
5455 if(dops[i].rs1==31) {
5456 do_miniht_jump(rs,rh,ht);
5463 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5464 if(dops[i].rt1!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5468 static void cjump_assemble(int i, const struct regstat *i_regs)
5470 const signed char *i_regmap = i_regs->regmap;
5473 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5474 assem_debug("match=%d\n",match);
5476 int unconditional=0,nop=0;
5478 int internal=internal_branch(cinfo[i].ba);
5479 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5480 if(!match) invert=1;
5481 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5482 if(i>(cinfo[i].ba-start)>>2) invert=1;
5485 invert=1; // because of near cond. branches
5489 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5490 s2l=get_reg(branch_regs[i].regmap,dops[i].rs2);
5493 s1l=get_reg(i_regmap,dops[i].rs1);
5494 s2l=get_reg(i_regmap,dops[i].rs2);
5496 if(dops[i].rs1==0&&dops[i].rs2==0)
5498 if(dops[i].opcode&1) nop=1;
5499 else unconditional=1;
5500 //assert(dops[i].opcode!=5);
5501 //assert(dops[i].opcode!=7);
5502 //assert(dops[i].opcode!=0x15);
5503 //assert(dops[i].opcode!=0x17);
5505 else if(dops[i].rs1==0)
5510 else if(dops[i].rs2==0)
5516 // Out of order execution (delay slot first)
5518 address_generation(i+1,i_regs,regs[i].regmap_entry);
5519 ds_assemble(i+1,i_regs);
5521 uint64_t bc_unneeded=branch_regs[i].u;
5522 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
5524 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5525 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs2);
5526 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5527 cc=get_reg(branch_regs[i].regmap,CCREG);
5528 assert(cc==HOST_CCREG);
5530 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5531 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?cinfo[i].ba:-1,unconditional);
5532 //assem_debug("cycle count (adj)\n");
5534 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5535 if(i!=(cinfo[i].ba-start)>>2 || source[i+1]!=0) {
5536 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5537 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5539 assem_debug("branch: internal\n");
5541 assem_debug("branch: external\n");
5542 if (internal && dops[(cinfo[i].ba-start)>>2].is_ds) {
5543 ds_assemble_entry(i);
5546 add_to_linker(out,cinfo[i].ba,internal);
5549 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5550 if(((u_int)out)&7) emit_addnop(0);
5555 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5558 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5561 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5562 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5563 if(adj&&!invert) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5565 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5567 if(dops[i].opcode==4) // BEQ
5569 if(s2l>=0) emit_cmp(s1l,s2l);
5570 else emit_test(s1l,s1l);
5575 add_to_linker(out,cinfo[i].ba,internal);
5579 if(dops[i].opcode==5) // BNE
5581 if(s2l>=0) emit_cmp(s1l,s2l);
5582 else emit_test(s1l,s1l);
5587 add_to_linker(out,cinfo[i].ba,internal);
5591 if(dops[i].opcode==6) // BLEZ
5598 add_to_linker(out,cinfo[i].ba,internal);
5602 if(dops[i].opcode==7) // BGTZ
5609 add_to_linker(out,cinfo[i].ba,internal);
5614 if(taken) set_jump_target(taken, out);
5615 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5616 if (match && (!internal || !dops[(cinfo[i].ba-start)>>2].is_ds)) {
5618 emit_addimm(cc,-adj,cc);
5619 add_to_linker(out,cinfo[i].ba,internal);
5622 add_to_linker(out,cinfo[i].ba,internal*2);
5628 if(adj) emit_addimm(cc,-adj,cc);
5629 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5630 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5632 assem_debug("branch: internal\n");
5634 assem_debug("branch: external\n");
5635 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5636 ds_assemble_entry(i);
5639 add_to_linker(out,cinfo[i].ba,internal);
5643 set_jump_target(nottaken, out);
5646 if(nottaken1) set_jump_target(nottaken1, out);
5648 if(!invert) emit_addimm(cc,adj,cc);
5650 } // (!unconditional)
5654 // In-order execution (branch first)
5655 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5656 if(!unconditional&&!nop) {
5657 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5659 if((dops[i].opcode&0x2f)==4) // BEQ
5661 if(s2l>=0) emit_cmp(s1l,s2l);
5662 else emit_test(s1l,s1l);
5666 if((dops[i].opcode&0x2f)==5) // BNE
5668 if(s2l>=0) emit_cmp(s1l,s2l);
5669 else emit_test(s1l,s1l);
5673 if((dops[i].opcode&0x2f)==6) // BLEZ
5679 if((dops[i].opcode&0x2f)==7) // BGTZ
5685 } // if(!unconditional)
5687 uint64_t ds_unneeded=branch_regs[i].u;
5688 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
5692 if(taken) set_jump_target(taken, out);
5693 assem_debug("1:\n");
5694 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5696 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5697 address_generation(i+1,&branch_regs[i],0);
5699 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5700 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5701 ds_assemble(i+1,&branch_regs[i]);
5702 drc_dbg_emit_wb_dirtys(i+1, &branch_regs[i]);
5703 cc=get_reg(branch_regs[i].regmap,CCREG);
5705 emit_loadreg(CCREG,cc=HOST_CCREG);
5706 // CHECK: Is the following instruction (fall thru) allocated ok?
5708 assert(cc==HOST_CCREG);
5709 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5710 do_cc(i,i_regmap,&adj,cinfo[i].ba,TAKEN,0);
5711 assem_debug("cycle count (adj)\n");
5712 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5713 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5715 assem_debug("branch: internal\n");
5717 assem_debug("branch: external\n");
5718 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5719 ds_assemble_entry(i);
5722 add_to_linker(out,cinfo[i].ba,internal);
5727 if(!unconditional) {
5728 if(nottaken1) set_jump_target(nottaken1, out);
5729 set_jump_target(nottaken, out);
5730 assem_debug("2:\n");
5731 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5733 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5734 address_generation(i+1,&branch_regs[i],0);
5736 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5737 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5738 ds_assemble(i+1,&branch_regs[i]);
5739 cc=get_reg(branch_regs[i].regmap,CCREG);
5741 // Cycle count isn't in a register, temporarily load it then write it out
5742 emit_loadreg(CCREG,HOST_CCREG);
5743 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
5746 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5747 emit_storereg(CCREG,HOST_CCREG);
5750 cc=get_reg(i_regmap,CCREG);
5751 assert(cc==HOST_CCREG);
5752 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5755 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5761 static void sjump_assemble(int i, const struct regstat *i_regs)
5763 const signed char *i_regmap = i_regs->regmap;
5766 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5767 assem_debug("smatch=%d ooo=%d\n", match, dops[i].ooo);
5769 int unconditional=0,nevertaken=0;
5771 int internal=internal_branch(cinfo[i].ba);
5772 if(i==(cinfo[i].ba-start)>>2) assem_debug("idle loop\n");
5773 if(!match) invert=1;
5774 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5775 if(i>(cinfo[i].ba-start)>>2) invert=1;
5778 invert=1; // because of near cond. branches
5781 //if(dops[i].opcode2>=0x10) return; // FIXME (BxxZAL)
5782 //assert(dops[i].opcode2<0x10||dops[i].rs1==0); // FIXME (BxxZAL)
5785 s1l=get_reg(branch_regs[i].regmap,dops[i].rs1);
5788 s1l=get_reg(i_regmap,dops[i].rs1);
5792 if(dops[i].opcode2&1) unconditional=1;
5794 // These are never taken (r0 is never less than zero)
5795 //assert(dops[i].opcode2!=0);
5796 //assert(dops[i].opcode2!=2);
5797 //assert(dops[i].opcode2!=0x10);
5798 //assert(dops[i].opcode2!=0x12);
5802 // Out of order execution (delay slot first)
5804 address_generation(i+1,i_regs,regs[i].regmap_entry);
5805 ds_assemble(i+1,i_regs);
5807 uint64_t bc_unneeded=branch_regs[i].u;
5808 bc_unneeded&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
5810 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5811 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i].rs1,dops[i].rs1);
5812 load_reg(regs[i].regmap,branch_regs[i].regmap,CCREG);
5813 if(dops[i].rt1==31) {
5814 int rt,return_address;
5815 rt=get_reg(branch_regs[i].regmap,31);
5816 //assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5818 // Save the PC even if the branch is not taken
5819 return_address=start+i*4+8;
5820 emit_movimm(return_address,rt); // PC into link register
5822 if(!nevertaken) emit_prefetch(hash_table_get(return_address));
5826 cc=get_reg(branch_regs[i].regmap,CCREG);
5827 assert(cc==HOST_CCREG);
5829 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5830 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?cinfo[i].ba:-1,unconditional);
5831 assem_debug("cycle count (adj)\n");
5833 do_cc(i,branch_regs[i].regmap,&adj,cinfo[i].ba,TAKEN,0);
5834 if(i!=(cinfo[i].ba-start)>>2 || source[i+1]!=0) {
5835 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5836 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5838 assem_debug("branch: internal\n");
5840 assem_debug("branch: external\n");
5841 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5842 ds_assemble_entry(i);
5845 add_to_linker(out,cinfo[i].ba,internal);
5848 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5849 if(((u_int)out)&7) emit_addnop(0);
5853 else if(nevertaken) {
5854 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
5857 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5860 void *nottaken = NULL;
5861 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5862 if(adj&&!invert) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5865 if ((dops[i].opcode2 & 1) == 0) // BLTZ/BLTZAL
5872 add_to_linker(out,cinfo[i].ba,internal);
5883 add_to_linker(out,cinfo[i].ba,internal);
5890 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5891 if (match && (!internal || !dops[(cinfo[i].ba - start) >> 2].is_ds)) {
5893 emit_addimm(cc,-adj,cc);
5894 add_to_linker(out,cinfo[i].ba,internal);
5897 add_to_linker(out,cinfo[i].ba,internal*2);
5903 if(adj) emit_addimm(cc,-adj,cc);
5904 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5905 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5907 assem_debug("branch: internal\n");
5909 assem_debug("branch: external\n");
5910 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5911 ds_assemble_entry(i);
5914 add_to_linker(out,cinfo[i].ba,internal);
5918 set_jump_target(nottaken, out);
5922 if(!invert) emit_addimm(cc,adj,cc);
5924 } // (!unconditional)
5928 // In-order execution (branch first)
5930 void *nottaken = NULL;
5931 if (!unconditional && !nevertaken) {
5933 emit_test(s1l, s1l);
5935 if (dops[i].rt1 == 31) {
5936 int rt, return_address;
5937 rt = get_reg(branch_regs[i].regmap,31);
5939 // Save the PC even if the branch is not taken
5940 return_address = start + i*4+8;
5941 emit_movimm(return_address, rt); // PC into link register
5943 emit_prefetch(hash_table_get(return_address));
5947 if (!unconditional && !nevertaken) {
5949 if (!(dops[i].opcode2 & 1)) // BLTZ/BLTZAL
5955 uint64_t ds_unneeded=branch_regs[i].u;
5956 ds_unneeded&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
5960 //assem_debug("1:\n");
5961 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5963 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
5964 address_generation(i+1,&branch_regs[i],0);
5966 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
5967 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5968 ds_assemble(i+1,&branch_regs[i]);
5969 cc=get_reg(branch_regs[i].regmap,CCREG);
5971 emit_loadreg(CCREG,cc=HOST_CCREG);
5972 // CHECK: Is the following instruction (fall thru) allocated ok?
5974 assert(cc==HOST_CCREG);
5975 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5976 do_cc(i,i_regmap,&adj,cinfo[i].ba,TAKEN,0);
5977 assem_debug("cycle count (adj)\n");
5978 if(adj) emit_addimm(cc, cinfo[i].ccadj + CLOCK_ADJUST(2) - adj, cc);
5979 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,cinfo[i].ba);
5981 assem_debug("branch: internal\n");
5983 assem_debug("branch: external\n");
5984 if (internal && dops[(cinfo[i].ba - start) >> 2].is_ds) {
5985 ds_assemble_entry(i);
5988 add_to_linker(out,cinfo[i].ba,internal);
5993 if(!unconditional) {
5996 set_jump_target(nottaken, out);
5998 assem_debug("1:\n");
5999 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
6000 load_regs(regs[i].regmap,branch_regs[i].regmap,dops[i+1].rs1,dops[i+1].rs2);
6001 address_generation(i+1,&branch_regs[i],0);
6003 load_reg(regs[i].regmap,branch_regs[i].regmap,ROREG);
6004 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
6005 ds_assemble(i+1,&branch_regs[i]);
6006 cc=get_reg(branch_regs[i].regmap,CCREG);
6008 // Cycle count isn't in a register, temporarily load it then write it out
6009 emit_loadreg(CCREG,HOST_CCREG);
6010 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), HOST_CCREG);
6013 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
6014 emit_storereg(CCREG,HOST_CCREG);
6017 cc=get_reg(i_regmap,CCREG);
6018 assert(cc==HOST_CCREG);
6019 emit_addimm_and_set_flags(cinfo[i].ccadj + CLOCK_ADJUST(2), cc);
6022 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
6028 static void check_regmap(signed char *regmap)
6032 for (i = 0; i < HOST_REGS; i++) {
6035 for (j = i + 1; j < HOST_REGS; j++)
6036 assert(regmap[i] != regmap[j]);
6042 #include <inttypes.h>
6043 static char insn[MAXBLOCK][10];
6045 #define set_mnemonic(i_, n_) \
6046 strcpy(insn[i_], n_)
6048 void print_regmap(const char *name, const signed char *regmap)
6052 fputs(name, stdout);
6053 for (i = 0; i < HOST_REGS; i++) {
6056 l = snprintf(buf, sizeof(buf), "$%d", regmap[i]);
6060 printf(" r%d=%s", i, buf);
6062 fputs("\n", stdout);
6066 void disassemble_inst(int i)
6068 if (dops[i].bt) printf("*"); else printf(" ");
6069 switch(dops[i].itype) {
6071 printf (" %x: %s %8x\n",start+i*4,insn[i],cinfo[i].ba);break;
6073 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2,i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):cinfo[i].ba);break;
6075 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],dops[i].rs1,start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6077 if (dops[i].opcode2 == 9 && dops[i].rt1 != 31)
6078 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1);
6080 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
6083 if(dops[i].opcode==0xf) //LUI
6084 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],dops[i].rt1,cinfo[i].imm&0xffff);
6086 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6090 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6094 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],dops[i].rs2,dops[i].rs1,cinfo[i].imm);
6098 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,dops[i].rs2);
6101 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],dops[i].rs1,dops[i].rs2);
6104 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],dops[i].rt1,dops[i].rs1,cinfo[i].imm);
6107 if((dops[i].opcode2&0x1d)==0x10)
6108 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rt1);
6109 else if((dops[i].opcode2&0x1d)==0x11)
6110 printf (" %x: %s r%d\n",start+i*4,insn[i],dops[i].rs1);
6112 printf (" %x: %s\n",start+i*4,insn[i]);
6115 if(dops[i].opcode2==0)
6116 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC0
6117 else if(dops[i].opcode2==4)
6118 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC0
6119 else printf (" %x: %s\n",start+i*4,insn[i]);
6122 if(dops[i].opcode2<3)
6123 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rt1,(source[i]>>11)&0x1f); // MFC2
6124 else if(dops[i].opcode2>3)
6125 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],dops[i].rs1,(source[i]>>11)&0x1f); // MTC2
6126 else printf (" %x: %s\n",start+i*4,insn[i]);
6129 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,dops[i].rs1,cinfo[i].imm);
6132 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6135 //printf (" %s %8x\n",insn[i],source[i]);
6136 printf (" %x: %s\n",start+i*4,insn[i]);
6138 #ifndef REGMAP_PRINT
6141 printf("D: %x WD: %x U: %"PRIx64" hC: %x hWC: %x hLC: %x\n",
6142 regs[i].dirty, regs[i].wasdirty, unneeded_reg[i],
6143 regs[i].isconst, regs[i].wasconst, regs[i].loadedconst);
6144 print_regmap("pre: ", regmap_pre[i]);
6145 print_regmap("entry: ", regs[i].regmap_entry);
6146 print_regmap("map: ", regs[i].regmap);
6147 if (dops[i].is_jump) {
6148 print_regmap("bentry:", branch_regs[i].regmap_entry);
6149 print_regmap("bmap: ", branch_regs[i].regmap);
6153 #define set_mnemonic(i_, n_)
6154 static void disassemble_inst(int i) {}
6157 #define DRC_TEST_VAL 0x74657374
6159 static noinline void new_dynarec_test(void)
6161 int (*testfunc)(void);
6166 // check structure linkage
6167 if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
6169 SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
6172 SysPrintf("(%p) testing if we can run recompiled code @%p...\n",
6173 new_dynarec_test, out);
6174 ((volatile u_int *)NDRC_WRITE_OFFSET(out))[0]++; // make the cache dirty
6176 for (i = 0; i < ARRAY_SIZE(ret); i++) {
6177 out = ndrc->translation_cache;
6178 beginning = start_block();
6179 emit_movimm(DRC_TEST_VAL + i, 0); // test
6182 end_block(beginning);
6183 testfunc = beginning;
6184 ret[i] = testfunc();
6187 if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
6188 SysPrintf("test passed.\n");
6190 SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
6191 out = ndrc->translation_cache;
6194 // clear the state completely, instead of just marking
6195 // things invalid like invalidate_all_pages() does
6196 void new_dynarec_clear_full(void)
6199 out = ndrc->translation_cache;
6200 memset(invalid_code,1,sizeof(invalid_code));
6201 memset(hash_table,0xff,sizeof(hash_table));
6202 memset(mini_ht,-1,sizeof(mini_ht));
6203 memset(shadow,0,sizeof(shadow));
6205 expirep = EXPIRITY_OFFSET;
6206 pending_exception=0;
6209 inv_code_start=inv_code_end=~0;
6212 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6213 blocks_clear(&blocks[n]);
6214 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6218 stat_clear(stat_blocks);
6219 stat_clear(stat_links);
6221 cycle_multiplier_old = Config.cycle_multiplier;
6222 new_dynarec_hacks_old = new_dynarec_hacks;
6225 void new_dynarec_init(void)
6227 SysPrintf("Init new dynarec, ndrc size %x\n", (int)sizeof(*ndrc));
6232 #ifdef BASE_ADDR_DYNAMIC
6234 sceBlock = getVMBlock(); //sceKernelAllocMemBlockForVM("code", sizeof(*ndrc));
6236 SysPrintf("sceKernelAllocMemBlockForVM failed: %x\n", sceBlock);
6237 int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
6239 SysPrintf("sceKernelGetMemBlockBase failed: %x\n", ret);
6240 sceKernelOpenVMDomain();
6241 sceClibPrintf("translation_cache = 0x%08lx\n ", (long)ndrc->translation_cache);
6242 #elif defined(_MSC_VER)
6243 ndrc = VirtualAlloc(NULL, sizeof(*ndrc), MEM_COMMIT | MEM_RESERVE,
6244 PAGE_EXECUTE_READWRITE);
6245 #elif defined(HAVE_LIBNX)
6246 Result rc = jitCreate(&g_jit, sizeof(*ndrc));
6248 SysPrintf("jitCreate failed: %08x\n", rc);
6249 SysPrintf("jitCreate: RX: %p RW: %p type: %d\n", g_jit.rx_addr, g_jit.rw_addr, g_jit.type);
6250 jitTransitionToWritable(&g_jit);
6251 ndrc = g_jit.rx_addr;
6252 ndrc_write_ofs = (char *)g_jit.rw_addr - (char *)ndrc;
6253 memset(NDRC_WRITE_OFFSET(&ndrc->tramp), 0, sizeof(ndrc->tramp));
6255 uintptr_t desired_addr = 0;
6256 int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
6257 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
6261 desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
6263 #ifdef TC_WRITE_OFFSET
6264 // mostly for testing
6265 fd = open("/dev/shm/pcsxr", O_CREAT | O_RDWR, 0600);
6266 ftruncate(fd, sizeof(*ndrc));
6267 void *mw = mmap(NULL, sizeof(*ndrc), PROT_READ | PROT_WRITE,
6268 (flags = MAP_SHARED), fd, 0);
6269 assert(mw != MAP_FAILED);
6270 prot = PROT_READ | PROT_EXEC;
6272 ndrc = mmap((void *)desired_addr, sizeof(*ndrc), prot, flags, fd, 0);
6273 if (ndrc == MAP_FAILED) {
6274 SysPrintf("mmap() failed: %s\n", strerror(errno));
6277 #ifdef TC_WRITE_OFFSET
6278 ndrc_write_ofs = (char *)mw - (char *)ndrc;
6282 #ifndef NO_WRITE_EXEC
6283 // not all systems allow execute in data segment by default
6284 // size must be 4K aligned for 3DS?
6285 if (mprotect(ndrc, sizeof(*ndrc),
6286 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
6287 SysPrintf("mprotect() failed: %s\n", strerror(errno));
6290 out = ndrc->translation_cache;
6291 new_dynarec_clear_full();
6293 // Copy this into local area so we don't have to put it in every literal pool
6294 invc_ptr=invalid_code;
6298 ram_offset = (uintptr_t)psxM - 0x80000000;
6300 SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
6301 SysPrintf("Mapped (RAM/scrp/ROM/LUTs/TC):\n");
6302 SysPrintf("%p/%p/%p/%p/%p\n", psxM, psxH, psxR, mem_rtab, out);
6305 void new_dynarec_cleanup(void)
6308 #ifdef BASE_ADDR_DYNAMIC
6310 // sceBlock is managed by retroarch's bootstrap code
6311 //sceKernelFreeMemBlock(sceBlock);
6313 #elif defined(HAVE_LIBNX)
6317 if (munmap(ndrc, sizeof(*ndrc)) < 0)
6318 SysPrintf("munmap() failed\n");
6322 for (n = 0; n < ARRAY_SIZE(blocks); n++)
6323 blocks_clear(&blocks[n]);
6324 for (n = 0; n < ARRAY_SIZE(jumps); n++) {
6328 stat_clear(stat_blocks);
6329 stat_clear(stat_links);
6330 new_dynarec_print_stats();
6333 static u_int *get_source_start(u_int addr, u_int *limit)
6335 if (addr < 0x00800000
6336 || (0x80000000 <= addr && addr < 0x80800000)
6337 || (0xa0000000 <= addr && addr < 0xa0800000))
6339 // used for BIOS calls mostly?
6340 *limit = (addr & 0xa0600000) + 0x00200000;
6341 return (u_int *)(psxM + (addr & 0x1fffff));
6344 /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
6345 (0xbfc00000 <= addr && addr < 0xbfc80000))
6347 // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
6348 // but timings in PCSX are too tied to the interpreter's 2-per-insn assumption
6349 if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
6350 cycle_multiplier_active = 200;
6352 *limit = (addr & 0xfff00000) | 0x80000;
6353 return (u_int *)((u_char *)psxR + (addr&0x7ffff));
6358 static u_int scan_for_ret(u_int addr)
6363 mem = get_source_start(addr, &limit);
6367 if (limit > addr + 0x1000)
6368 limit = addr + 0x1000;
6369 for (; addr < limit; addr += 4, mem++) {
6370 if (*mem == 0x03e00008) // jr $ra
6376 struct savestate_block {
6381 static int addr_cmp(const void *p1_, const void *p2_)
6383 const struct savestate_block *p1 = p1_, *p2 = p2_;
6384 return p1->addr - p2->addr;
6387 int new_dynarec_save_blocks(void *save, int size)
6389 struct savestate_block *sblocks = save;
6390 int maxcount = size / sizeof(sblocks[0]);
6391 struct savestate_block tmp_blocks[1024];
6392 struct block_info *block;
6393 int p, s, d, o, bcnt;
6397 for (p = 0; p < ARRAY_SIZE(blocks); p++) {
6399 for (block = blocks[p]; block != NULL; block = block->next) {
6400 if (block->is_dirty)
6402 tmp_blocks[bcnt].addr = block->start;
6403 tmp_blocks[bcnt].regflags = block->reg_sv_flags;
6408 qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
6410 addr = tmp_blocks[0].addr;
6411 for (s = d = 0; s < bcnt; s++) {
6412 if (tmp_blocks[s].addr < addr)
6414 if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
6415 tmp_blocks[d++] = tmp_blocks[s];
6416 addr = scan_for_ret(tmp_blocks[s].addr);
6419 if (o + d > maxcount)
6421 memcpy(&sblocks[o], tmp_blocks, d * sizeof(sblocks[0]));
6425 return o * sizeof(sblocks[0]);
6428 void new_dynarec_load_blocks(const void *save, int size)
6430 const struct savestate_block *sblocks = save;
6431 int count = size / sizeof(sblocks[0]);
6432 struct block_info *block;
6433 u_int regs_save[32];
6438 // restore clean blocks, if any
6439 for (page = 0, b = i = 0; page < ARRAY_SIZE(blocks); page++) {
6440 for (block = blocks[page]; block != NULL; block = block->next, b++) {
6441 if (!block->is_dirty)
6443 assert(block->source && block->copy);
6444 if (memcmp(block->source, block->copy, block->len))
6447 // see try_restore_block
6448 block->is_dirty = 0;
6449 mark_invalid_code(block->start, block->len, 0);
6453 inv_debug("load_blocks: %d/%d clean blocks\n", i, b);
6455 // change GPRs for speculation to at least partially work..
6456 memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
6457 for (i = 1; i < 32; i++)
6458 psxRegs.GPR.r[i] = 0x80000000;
6460 for (b = 0; b < count; b++) {
6461 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
6463 psxRegs.GPR.r[i] = 0x1f800000;
6466 ndrc_get_addr_ht(sblocks[b].addr);
6468 for (f = sblocks[b].regflags, i = 0; f; f >>= 1, i++) {
6470 psxRegs.GPR.r[i] = 0x80000000;
6474 memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
6477 void new_dynarec_print_stats(void)
6480 printf("cc %3d,%3d,%3d lu%6d,%3d,%3d c%3d inv%3d,%3d tc_offs %zu b %u,%u\n",
6481 stat_bc_pre, stat_bc_direct, stat_bc_restore,
6482 stat_ht_lookups, stat_jump_in_lookups, stat_restore_tries,
6483 stat_restore_compares, stat_inv_addr_calls, stat_inv_hits,
6484 out - ndrc->translation_cache, stat_blocks, stat_links);
6485 stat_bc_direct = stat_bc_pre = stat_bc_restore =
6486 stat_ht_lookups = stat_jump_in_lookups = stat_restore_tries =
6487 stat_restore_compares = stat_inv_addr_calls = stat_inv_hits = 0;
6491 static int apply_hacks(void)
6494 if (HACK_ENABLED(NDHACK_NO_COMPAT_HACKS))
6496 /* special hack(s) */
6497 for (i = 0; i < slen - 4; i++)
6499 // lui a4, 0xf200; jal <rcnt_read>; addu a0, 2; slti v0, 28224
6500 if (source[i] == 0x3c04f200 && dops[i+1].itype == UJUMP
6501 && source[i+2] == 0x34840002 && dops[i+3].opcode == 0x0a
6502 && cinfo[i+3].imm == 0x6e40 && dops[i+3].rs1 == 2)
6504 SysPrintf("PE2 hack @%08x\n", start + (i+3)*4);
6505 dops[i + 3].itype = NOP;
6509 if (i > 10 && source[i-1] == 0 && source[i-2] == 0x03e00008
6510 && source[i-4] == 0x8fbf0018 && source[i-6] == 0x00c0f809
6511 && dops[i-7].itype == STORE)
6514 if (dops[i].itype == IMM16)
6516 // swl r2, 15(r6); swr r2, 12(r6); sw r6, *; jalr r6
6517 if (dops[i].itype == STORELR && dops[i].rs1 == 6
6518 && dops[i-1].itype == STORELR && dops[i-1].rs1 == 6)
6520 SysPrintf("F1 hack from %08x, old dst %08x\n", start, hack_addr);
6528 static int is_ld_use_hazard(int ld_rt, const struct decoded_insn *op)
6530 return ld_rt != 0 && (ld_rt == op->rs1 || ld_rt == op->rs2)
6531 && op->itype != LOADLR && op->itype != CJUMP && op->itype != SJUMP;
6534 static void force_intcall(int i)
6536 memset(&dops[i], 0, sizeof(dops[i]));
6537 dops[i].itype = INTCALL;
6538 dops[i].rs1 = CCREG;
6539 dops[i].is_exception = 1;
6543 static void disassemble_one(int i, u_int src)
6545 unsigned int type, op, op2, op3;
6546 enum ls_width_type ls_type = LS_32;
6547 memset(&dops[i], 0, sizeof(dops[i]));
6548 memset(&cinfo[i], 0, sizeof(cinfo[i]));
6551 dops[i].opcode = op = src >> 26;
6554 set_mnemonic(i, "???");
6557 case 0x00: set_mnemonic(i, "special");
6561 case 0x00: set_mnemonic(i, "SLL"); type=SHIFTIMM; break;
6562 case 0x02: set_mnemonic(i, "SRL"); type=SHIFTIMM; break;
6563 case 0x03: set_mnemonic(i, "SRA"); type=SHIFTIMM; break;
6564 case 0x04: set_mnemonic(i, "SLLV"); type=SHIFT; break;
6565 case 0x06: set_mnemonic(i, "SRLV"); type=SHIFT; break;
6566 case 0x07: set_mnemonic(i, "SRAV"); type=SHIFT; break;
6567 case 0x08: set_mnemonic(i, "JR"); type=RJUMP; break;
6568 case 0x09: set_mnemonic(i, "JALR"); type=RJUMP; break;
6569 case 0x0C: set_mnemonic(i, "SYSCALL"); type=SYSCALL; break;
6570 case 0x0D: set_mnemonic(i, "BREAK"); type=SYSCALL; break;
6571 case 0x10: set_mnemonic(i, "MFHI"); type=MOV; break;
6572 case 0x11: set_mnemonic(i, "MTHI"); type=MOV; break;
6573 case 0x12: set_mnemonic(i, "MFLO"); type=MOV; break;
6574 case 0x13: set_mnemonic(i, "MTLO"); type=MOV; break;
6575 case 0x18: set_mnemonic(i, "MULT"); type=MULTDIV; break;
6576 case 0x19: set_mnemonic(i, "MULTU"); type=MULTDIV; break;
6577 case 0x1A: set_mnemonic(i, "DIV"); type=MULTDIV; break;
6578 case 0x1B: set_mnemonic(i, "DIVU"); type=MULTDIV; break;
6579 case 0x20: set_mnemonic(i, "ADD"); type=ALU; break;
6580 case 0x21: set_mnemonic(i, "ADDU"); type=ALU; break;
6581 case 0x22: set_mnemonic(i, "SUB"); type=ALU; break;
6582 case 0x23: set_mnemonic(i, "SUBU"); type=ALU; break;
6583 case 0x24: set_mnemonic(i, "AND"); type=ALU; break;
6584 case 0x25: set_mnemonic(i, "OR"); type=ALU; break;
6585 case 0x26: set_mnemonic(i, "XOR"); type=ALU; break;
6586 case 0x27: set_mnemonic(i, "NOR"); type=ALU; break;
6587 case 0x2A: set_mnemonic(i, "SLT"); type=ALU; break;
6588 case 0x2B: set_mnemonic(i, "SLTU"); type=ALU; break;
6591 case 0x01: set_mnemonic(i, "regimm");
6593 op2 = (src >> 16) & 0x1f;
6596 case 0x10: set_mnemonic(i, "BLTZAL"); break;
6597 case 0x11: set_mnemonic(i, "BGEZAL"); break;
6600 set_mnemonic(i, "BGEZ");
6602 set_mnemonic(i, "BLTZ");
6605 case 0x02: set_mnemonic(i, "J"); type=UJUMP; break;
6606 case 0x03: set_mnemonic(i, "JAL"); type=UJUMP; break;
6607 case 0x04: set_mnemonic(i, "BEQ"); type=CJUMP; break;
6608 case 0x05: set_mnemonic(i, "BNE"); type=CJUMP; break;
6609 case 0x06: set_mnemonic(i, "BLEZ"); type=CJUMP; break;
6610 case 0x07: set_mnemonic(i, "BGTZ"); type=CJUMP; break;
6611 case 0x08: set_mnemonic(i, "ADDI"); type=IMM16; break;
6612 case 0x09: set_mnemonic(i, "ADDIU"); type=IMM16; break;
6613 case 0x0A: set_mnemonic(i, "SLTI"); type=IMM16; break;
6614 case 0x0B: set_mnemonic(i, "SLTIU"); type=IMM16; break;
6615 case 0x0C: set_mnemonic(i, "ANDI"); type=IMM16; break;
6616 case 0x0D: set_mnemonic(i, "ORI"); type=IMM16; break;
6617 case 0x0E: set_mnemonic(i, "XORI"); type=IMM16; break;
6618 case 0x0F: set_mnemonic(i, "LUI"); type=IMM16; break;
6619 case 0x10: set_mnemonic(i, "COP0");
6620 op2 = (src >> 21) & 0x1f;
6625 case 0x01: case 0x02: case 0x06: case 0x08: type = INTCALL; break;
6626 case 0x10: set_mnemonic(i, "RFE"); type=RFE; break;
6627 default: type = OTHER; break;
6635 set_mnemonic(i, "MFC0");
6636 rd = (src >> 11) & 0x1F;
6637 if (!(0x00000417u & (1u << rd)))
6640 case 0x04: set_mnemonic(i, "MTC0"); type=COP0; break;
6642 case 0x06: type = INTCALL; break;
6643 default: type = OTHER; break;
6646 case 0x11: set_mnemonic(i, "COP1");
6647 op2 = (src >> 21) & 0x1f;
6649 case 0x12: set_mnemonic(i, "COP2");
6650 op2 = (src >> 21) & 0x1f;
6653 if (gte_handlers[src & 0x3f] != NULL) {
6655 if (gte_regnames[src & 0x3f] != NULL)
6656 strcpy(insn[i], gte_regnames[src & 0x3f]);
6658 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", src & 0x3f);
6665 case 0x00: set_mnemonic(i, "MFC2"); type=COP2; break;
6666 case 0x02: set_mnemonic(i, "CFC2"); type=COP2; break;
6667 case 0x04: set_mnemonic(i, "MTC2"); type=COP2; break;
6668 case 0x06: set_mnemonic(i, "CTC2"); type=COP2; break;
6671 case 0x13: set_mnemonic(i, "COP3");
6672 op2 = (src >> 21) & 0x1f;
6674 case 0x20: set_mnemonic(i, "LB"); type=LOAD; ls_type = LS_8; break;
6675 case 0x21: set_mnemonic(i, "LH"); type=LOAD; ls_type = LS_16; break;
6676 case 0x22: set_mnemonic(i, "LWL"); type=LOADLR; ls_type = LS_LR; break;
6677 case 0x23: set_mnemonic(i, "LW"); type=LOAD; ls_type = LS_32; break;
6678 case 0x24: set_mnemonic(i, "LBU"); type=LOAD; ls_type = LS_8; break;
6679 case 0x25: set_mnemonic(i, "LHU"); type=LOAD; ls_type = LS_16; break;
6680 case 0x26: set_mnemonic(i, "LWR"); type=LOADLR; ls_type = LS_LR; break;
6681 case 0x28: set_mnemonic(i, "SB"); type=STORE; ls_type = LS_8; break;
6682 case 0x29: set_mnemonic(i, "SH"); type=STORE; ls_type = LS_16; break;
6683 case 0x2A: set_mnemonic(i, "SWL"); type=STORELR; ls_type = LS_LR; break;
6684 case 0x2B: set_mnemonic(i, "SW"); type=STORE; ls_type = LS_32; break;
6685 case 0x2E: set_mnemonic(i, "SWR"); type=STORELR; ls_type = LS_LR; break;
6686 case 0x32: set_mnemonic(i, "LWC2"); type=C2LS; ls_type = LS_32; break;
6687 case 0x3A: set_mnemonic(i, "SWC2"); type=C2LS; ls_type = LS_32; break;
6689 if (Config.HLE && (src & 0x03ffffff) < ARRAY_SIZE(psxHLEt)) {
6690 set_mnemonic(i, "HLECALL");
6697 if (type == INTCALL)
6698 SysPrintf("NI %08x @%08x (%08x)\n", src, start + i*4, start);
6699 dops[i].itype = type;
6700 dops[i].opcode2 = op2;
6701 dops[i].ls_type = ls_type;
6702 /* Get registers/immediates */
6704 gte_rs[i]=gte_rt[i]=0;
6711 dops[i].rs1 = (src >> 21) & 0x1f;
6712 dops[i].rt1 = (src >> 16) & 0x1f;
6713 cinfo[i].imm = (short)src;
6717 dops[i].rs1 = (src >> 21) & 0x1f;
6718 dops[i].rs2 = (src >> 16) & 0x1f;
6719 cinfo[i].imm = (short)src;
6722 // LWL/LWR only load part of the register,
6723 // therefore the target register must be treated as a source too
6724 dops[i].rs1 = (src >> 21) & 0x1f;
6725 dops[i].rs2 = (src >> 16) & 0x1f;
6726 dops[i].rt1 = (src >> 16) & 0x1f;
6727 cinfo[i].imm = (short)src;
6730 if (op==0x0f) dops[i].rs1=0; // LUI instruction has no source register
6731 else dops[i].rs1 = (src >> 21) & 0x1f;
6733 dops[i].rt1 = (src >> 16) & 0x1f;
6734 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
6735 cinfo[i].imm = (unsigned short)src;
6737 cinfo[i].imm = (short)src;
6741 // The JAL instruction writes to r31.
6748 dops[i].rs1 = (src >> 21) & 0x1f;
6749 // The JALR instruction writes to rd.
6751 dops[i].rt1 = (src >> 11) & 0x1f;
6756 dops[i].rs1 = (src >> 21) & 0x1f;
6757 dops[i].rs2 = (src >> 16) & 0x1f;
6758 if(op&2) { // BGTZ/BLEZ
6763 dops[i].rs1 = (src >> 21) & 0x1f;
6764 dops[i].rs2 = CCREG;
6765 if (op2 == 0x10 || op2 == 0x11) { // BxxAL
6767 // NOTE: If the branch is not taken, r31 is still overwritten
6771 dops[i].rs1=(src>>21)&0x1f; // source
6772 dops[i].rs2=(src>>16)&0x1f; // subtract amount
6773 dops[i].rt1=(src>>11)&0x1f; // destination
6776 dops[i].rs1=(src>>21)&0x1f; // source
6777 dops[i].rs2=(src>>16)&0x1f; // divisor
6782 if(op2==0x10) dops[i].rs1=HIREG; // MFHI
6783 if(op2==0x11) dops[i].rt1=HIREG; // MTHI
6784 if(op2==0x12) dops[i].rs1=LOREG; // MFLO
6785 if(op2==0x13) dops[i].rt1=LOREG; // MTLO
6786 if((op2&0x1d)==0x10) dops[i].rt1=(src>>11)&0x1f; // MFxx
6787 if((op2&0x1d)==0x11) dops[i].rs1=(src>>21)&0x1f; // MTxx
6790 dops[i].rs1=(src>>16)&0x1f; // target of shift
6791 dops[i].rs2=(src>>21)&0x1f; // shift amount
6792 dops[i].rt1=(src>>11)&0x1f; // destination
6795 dops[i].rs1=(src>>16)&0x1f;
6797 dops[i].rt1=(src>>11)&0x1f;
6798 cinfo[i].imm=(src>>6)&0x1f;
6801 if(op2==0) dops[i].rt1=(src>>16)&0x1F; // MFC0
6802 if(op2==4) dops[i].rs1=(src>>16)&0x1F; // MTC0
6803 if(op2==4&&((src>>11)&0x1e)==12) dops[i].rs2=CCREG;
6806 if(op2<3) dops[i].rt1=(src>>16)&0x1F; // MFC2/CFC2
6807 if(op2>3) dops[i].rs1=(src>>16)&0x1F; // MTC2/CTC2
6808 int gr=(src>>11)&0x1F;
6811 case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
6812 case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
6813 case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
6814 case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
6818 dops[i].rs1=(src>>21)&0x1F;
6819 cinfo[i].imm=(short)src;
6820 if(op==0x32) gte_rt[i]=1ll<<((src>>16)&0x1F); // LWC2
6821 else gte_rs[i]=1ll<<((src>>16)&0x1F); // SWC2
6824 gte_rs[i]=gte_reg_reads[src&0x3f];
6825 gte_rt[i]=gte_reg_writes[src&0x3f];
6826 gte_rt[i]|=1ll<<63; // every op changes flags
6827 if((src&0x3f)==GTE_MVMVA) {
6828 int v = (src >> 15) & 3;
6829 gte_rs[i]&=~0xe3fll;
6830 if(v==3) gte_rs[i]|=0xe00ll;
6831 else gte_rs[i]|=3ll<<(v*2);
6844 static noinline void pass1_disassemble(u_int pagelimit)
6846 int i, j, done = 0, ni_count = 0;
6849 for (i = 0; !done; i++)
6851 int force_j_to_interpreter = 0;
6852 unsigned int type, op, op2;
6854 disassemble_one(i, source[i]);
6855 dops[i].is_ds = ds_next; ds_next = 0;
6856 type = dops[i].itype;
6857 op = dops[i].opcode;
6858 op2 = dops[i].opcode2;
6860 /* Calculate branch target addresses */
6862 cinfo[i].ba=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
6863 else if(type==CJUMP&&dops[i].rs1==dops[i].rs2&&(op&1))
6864 cinfo[i].ba=start+i*4+8; // Ignore never taken branch
6865 else if(type==SJUMP&&dops[i].rs1==0&&!(op2&1))
6866 cinfo[i].ba=start+i*4+8; // Ignore never taken branch
6867 else if(type==CJUMP||type==SJUMP)
6868 cinfo[i].ba=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
6870 /* simplify always (not)taken branches */
6871 if (type == CJUMP && dops[i].rs1 == dops[i].rs2) {
6872 dops[i].rs1 = dops[i].rs2 = 0;
6874 dops[i].itype = type = UJUMP;
6875 dops[i].rs2 = CCREG;
6878 else if (type == SJUMP && dops[i].rs1 == 0 && (op2 & 1))
6879 dops[i].itype = type = UJUMP;
6881 dops[i].is_jump = type == RJUMP || type == UJUMP || type == CJUMP || type == SJUMP;
6882 dops[i].is_ujump = type == RJUMP || type == UJUMP;
6883 dops[i].is_load = type == LOAD || type == LOADLR || op == 0x32; // LWC2
6884 dops[i].is_delay_load = (dops[i].is_load || (source[i] & 0xf3d00000) == 0x40000000); // MFC/CFC
6885 dops[i].is_store = type == STORE || type == STORELR || op == 0x3a; // SWC2
6886 dops[i].is_exception = type == SYSCALL || type == HLECALL || type == INTCALL;
6887 dops[i].may_except = dops[i].is_exception || (type == ALU && (op2 == 0x20 || op2 == 0x22)) || op == 8;
6888 ds_next = dops[i].is_jump;
6890 if (((op & 0x37) == 0x21 || op == 0x25) // LH/SH/LHU
6891 && ((cinfo[i].imm & 1) || Config.PreciseExceptions))
6892 dops[i].may_except = 1;
6893 if (((op & 0x37) == 0x23 || (op & 0x37) == 0x32) // LW/SW/LWC2/SWC2
6894 && ((cinfo[i].imm & 3) || Config.PreciseExceptions))
6895 dops[i].may_except = 1;
6897 /* rare messy cases to just pass over to the interpreter */
6898 if (i > 0 && dops[i-1].is_jump) {
6900 // branch in delay slot?
6901 if (dops[i].is_jump) {
6902 // don't handle first branch and call interpreter if it's hit
6903 SysPrintf("branch in DS @%08x (%08x)\n", start + i*4, start);
6904 force_j_to_interpreter = 1;
6906 // load delay detection through a branch
6907 else if (dops[i].is_delay_load && dops[i].rt1 != 0) {
6908 const struct decoded_insn *dop = NULL;
6910 if (cinfo[i-1].ba != -1) {
6911 t = (cinfo[i-1].ba - start) / 4;
6912 if (t < 0 || t > i) {
6914 u_int *mem = get_source_start(cinfo[i-1].ba, &limit);
6916 disassemble_one(MAXBLOCK - 1, mem[0]);
6917 dop = &dops[MAXBLOCK - 1];
6923 if ((dop && is_ld_use_hazard(dops[i].rt1, dop))
6924 || (!dop && Config.PreciseExceptions)) {
6925 // jump target wants DS result - potential load delay effect
6926 SysPrintf("load delay in DS @%08x (%08x)\n", start + i*4, start);
6927 force_j_to_interpreter = 1;
6928 if (0 <= t && t < i)
6929 dops[t + 1].bt = 1; // expected return from interpreter
6931 else if(i>=2&&dops[i-2].rt1==2&&dops[i].rt1==2&&dops[i].rs1!=2&&dops[i].rs2!=2&&dops[i-1].rs1!=2&&dops[i-1].rs2!=2&&
6932 !(i>=3&&dops[i-3].is_jump)) {
6933 // v0 overwrite like this is a sign of trouble, bail out
6934 SysPrintf("v0 overwrite @%08x (%08x)\n", start + i*4, start);
6935 force_j_to_interpreter = 1;
6939 else if (i > 0 && dops[i-1].is_delay_load
6940 && is_ld_use_hazard(dops[i-1].rt1, &dops[i])
6941 && (i < 2 || !dops[i-2].is_ujump)) {
6942 SysPrintf("load delay @%08x (%08x)\n", start + i*4, start);
6943 for (j = i - 1; j > 0 && dops[j-1].is_delay_load; j--)
6944 if (dops[j-1].rt1 != dops[i-1].rt1)
6946 force_j_to_interpreter = 1;
6948 if (force_j_to_interpreter) {
6951 i = j; // don't compile the problematic branch/load/etc
6953 if (dops[i].is_exception && i > 0 && dops[i-1].is_jump) {
6954 SysPrintf("exception in DS @%08x (%08x)\n", start + i*4, start);
6959 if (i >= 2 && (source[i-2] & 0xffe0f800) == 0x40806000) // MTC0 $12
6961 if (i >= 1 && (source[i-1] & 0xffe0f800) == 0x40806800) // MTC0 $13
6964 /* Is this the end of the block? */
6965 if (i > 0 && dops[i-1].is_ujump) {
6966 if (dops[i-1].rt1 == 0) { // not jal
6967 int found_bbranch = 0, t = (cinfo[i-1].ba - start) / 4;
6968 if ((u_int)(t - i) < 64 && start + (t+64)*4 < pagelimit) {
6969 // scan for a branch back to i+1
6970 for (j = t; j < t + 64; j++) {
6971 int tmpop = source[j] >> 26;
6972 if (tmpop == 1 || ((tmpop & ~3) == 4)) {
6973 int t2 = j + 1 + (int)(signed short)source[j];
6975 //printf("blk expand %08x<-%08x\n", start + (i+1)*4, start + j*4);
6986 if(stop_after_jal) done=1;
6988 if((source[i+1]&0xfc00003f)==0x0d) done=1;
6990 // Don't recompile stuff that's already compiled
6991 if(check_addr(start+i*4+4)) done=1;
6992 // Don't get too close to the limit
6993 if (i > MAXBLOCK - 64)
6996 if (dops[i].itype == HLECALL)
6998 else if (dops[i].itype == INTCALL)
7000 else if (dops[i].is_exception)
7001 done = stop_after_jal ? 1 : 2;
7003 // Does the block continue due to a branch?
7006 if(cinfo[j].ba==start+i*4) done=j=0; // Branch into delay slot
7007 if(cinfo[j].ba==start+i*4+4) done=j=0;
7008 if(cinfo[j].ba==start+i*4+8) done=j=0;
7011 //assert(i<MAXBLOCK-1);
7012 if(start+i*4==pagelimit-4) done=1;
7013 assert(start+i*4<pagelimit);
7014 if (i == MAXBLOCK - 2)
7016 // Stop if we're compiling junk
7017 if (dops[i].itype == INTCALL && (++ni_count > 8 || dops[i].opcode == 0x11)) {
7018 done=stop_after_jal=1;
7019 SysPrintf("Disabled speculative precompilation\n");
7022 while (i > 0 && dops[i-1].is_jump)
7025 assert(!dops[i-1].is_jump);
7029 // Basic liveness analysis for MIPS registers
7030 static noinline void pass2_unneeded_regs(int istart,int iend,int r)
7033 uint64_t u,gte_u,b,gte_b;
7034 uint64_t temp_u,temp_gte_u=0;
7035 uint64_t gte_u_unknown=0;
7036 if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
7040 gte_u=gte_u_unknown;
7042 //u=unneeded_reg[iend+1];
7044 gte_u=gte_unneeded[iend+1];
7047 for (i=iend;i>=istart;i--)
7049 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
7052 // If subroutine call, flag return address as a possible branch target
7053 if(dops[i].rt1==31 && i<slen-2) dops[i+2].bt=1;
7055 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
7057 // Branch out of this block, flush all regs
7059 gte_u=gte_u_unknown;
7060 branch_unneeded_reg[i]=u;
7061 // Merge in delay slot
7062 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7063 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7066 gte_u&=~gte_rs[i+1];
7070 // Internal branch, flag target
7071 dops[(cinfo[i].ba-start)>>2].bt=1;
7072 if(cinfo[i].ba<=start+i*4) {
7074 if(dops[i].is_ujump)
7076 // Unconditional branch
7080 // Conditional branch (not taken case)
7081 temp_u=unneeded_reg[i+2];
7082 temp_gte_u&=gte_unneeded[i+2];
7084 // Merge in delay slot
7085 temp_u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7086 temp_u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7088 temp_gte_u|=gte_rt[i+1];
7089 temp_gte_u&=~gte_rs[i+1];
7090 temp_u|=(1LL<<dops[i].rt1)|(1LL<<dops[i].rt2);
7091 temp_u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7093 temp_gte_u|=gte_rt[i];
7094 temp_gte_u&=~gte_rs[i];
7095 unneeded_reg[i]=temp_u;
7096 gte_unneeded[i]=temp_gte_u;
7097 // Only go three levels deep. This recursion can take an
7098 // excessive amount of time if there are a lot of nested loops.
7100 pass2_unneeded_regs((cinfo[i].ba-start)>>2,i-1,r+1);
7102 unneeded_reg[(cinfo[i].ba-start)>>2]=1;
7103 gte_unneeded[(cinfo[i].ba-start)>>2]=gte_u_unknown;
7106 if (dops[i].is_ujump)
7108 // Unconditional branch
7109 u=unneeded_reg[(cinfo[i].ba-start)>>2];
7110 gte_u=gte_unneeded[(cinfo[i].ba-start)>>2];
7111 branch_unneeded_reg[i]=u;
7112 // Merge in delay slot
7113 u|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7114 u&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7117 gte_u&=~gte_rs[i+1];
7119 // Conditional branch
7120 b=unneeded_reg[(cinfo[i].ba-start)>>2];
7121 gte_b=gte_unneeded[(cinfo[i].ba-start)>>2];
7122 branch_unneeded_reg[i]=b;
7123 // Branch delay slot
7124 b|=(1LL<<dops[i+1].rt1)|(1LL<<dops[i+1].rt2);
7125 b&=~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7128 gte_b&=~gte_rs[i+1];
7132 branch_unneeded_reg[i]&=unneeded_reg[i+2];
7134 branch_unneeded_reg[i]=1;
7141 // Written registers are unneeded
7142 u|=1LL<<dops[i].rt1;
7143 u|=1LL<<dops[i].rt2;
7145 // Accessed registers are needed
7146 u&=~(1LL<<dops[i].rs1);
7147 u&=~(1LL<<dops[i].rs2);
7149 if(gte_rs[i]&&dops[i].rt1&&(unneeded_reg[i+1]&(1ll<<dops[i].rt1)))
7150 gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
7151 if (dops[i].may_except || dops[i].itype == RFE)
7153 // SYSCALL instruction, etc or conditional exception
7156 // Source-target dependencies
7157 // R0 is always unneeded
7161 gte_unneeded[i]=gte_u;
7163 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
7166 for(r=1;r<=CCREG;r++) {
7167 if((unneeded_reg[i]>>r)&1) {
7168 if(r==HIREG) printf(" HI");
7169 else if(r==LOREG) printf(" LO");
7170 else printf(" r%d",r);
7178 static noinline void pass2a_unneeded_other(void)
7181 for (i = 0; i < slen; i++)
7183 // remove redundant alignment checks
7184 if (dops[i].may_except && (dops[i].is_load || dops[i].is_store)
7185 && dops[i].rt1 != dops[i].rs1 && !dops[i].is_ds)
7187 int base = dops[i].rs1, lsb = cinfo[i].imm, ls_type = dops[i].ls_type;
7188 int mask = ls_type == LS_32 ? 3 : 1;
7190 for (j = i + 1; j < slen; j++) {
7191 if (dops[j].bt || dops[j].is_jump)
7193 if ((dops[j].is_load || dops[j].is_store) && dops[j].rs1 == base
7194 && dops[j].ls_type == ls_type && (cinfo[j].imm & mask) == lsb)
7195 dops[j].may_except = 0;
7196 if (dops[j].rt1 == base)
7203 static noinline void pass3_register_alloc(u_int addr)
7205 struct regstat current; // Current register allocations/status
7206 clear_all_regs(current.regmap_entry);
7207 clear_all_regs(current.regmap);
7208 current.wasdirty = current.dirty = 0;
7209 current.u = unneeded_reg[0];
7210 alloc_reg(¤t, 0, CCREG);
7211 dirty_reg(¤t, CCREG);
7212 current.wasconst = 0;
7213 current.isconst = 0;
7214 current.loadedconst = 0;
7215 current.noevict = 0;
7216 //current.waswritten = 0;
7223 // First instruction is delay slot
7234 for(hr=0;hr<HOST_REGS;hr++)
7236 // Is this really necessary?
7237 if(current.regmap[hr]==0) current.regmap[hr]=-1;
7240 //current.waswritten=0;
7243 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
7244 regs[i].wasconst=current.isconst;
7245 regs[i].wasdirty=current.dirty;
7249 regs[i].loadedconst=0;
7250 if (!dops[i].is_jump) {
7252 current.u=unneeded_reg[i+1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7259 current.u=branch_unneeded_reg[i]&~((1LL<<dops[i+1].rs1)|(1LL<<dops[i+1].rs2));
7260 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7263 SysPrintf("oops, branch at end of block with no delay slot @%08x\n", start + i*4);
7267 assert(dops[i].is_ds == ds);
7269 ds=0; // Skip delay slot, already allocated as part of branch
7270 // ...but we need to alloc it in case something jumps here
7272 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
7274 current.u=branch_unneeded_reg[i-1];
7276 current.u&=~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7278 struct regstat temp;
7279 memcpy(&temp,¤t,sizeof(current));
7280 temp.wasdirty=temp.dirty;
7281 // TODO: Take into account unconditional branches, as below
7282 delayslot_alloc(&temp,i);
7283 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
7284 regs[i].wasdirty=temp.wasdirty;
7285 regs[i].dirty=temp.dirty;
7289 // Create entry (branch target) regmap
7290 for(hr=0;hr<HOST_REGS;hr++)
7292 int r=temp.regmap[hr];
7294 if(r!=regmap_pre[i][hr]) {
7295 regs[i].regmap_entry[hr]=-1;
7300 if((current.u>>r)&1) {
7301 regs[i].regmap_entry[hr]=-1;
7302 regs[i].regmap[hr]=-1;
7303 //Don't clear regs in the delay slot as the branch might need them
7304 //current.regmap[hr]=-1;
7306 regs[i].regmap_entry[hr]=r;
7309 // First instruction expects CCREG to be allocated
7310 if(i==0&&hr==HOST_CCREG)
7311 regs[i].regmap_entry[hr]=CCREG;
7313 regs[i].regmap_entry[hr]=-1;
7317 else { // Not delay slot
7318 current.noevict = 0;
7319 switch(dops[i].itype) {
7321 //current.isconst=0; // DEBUG
7322 //current.wasconst=0; // DEBUG
7323 //regs[i].wasconst=0; // DEBUG
7324 clear_const(¤t,dops[i].rt1);
7325 alloc_cc(¤t,i);
7326 dirty_reg(¤t,CCREG);
7327 if (dops[i].rt1==31) {
7328 alloc_reg(¤t,i,31);
7329 dirty_reg(¤t,31);
7330 //assert(dops[i+1].rs1!=31&&dops[i+1].rs2!=31);
7331 //assert(dops[i+1].rt1!=dops[i].rt1);
7333 alloc_reg(¤t,i,PTEMP);
7337 delayslot_alloc(¤t,i+1);
7338 //current.isconst=0; // DEBUG
7342 //current.isconst=0;
7343 //current.wasconst=0;
7344 //regs[i].wasconst=0;
7345 clear_const(¤t,dops[i].rs1);
7346 clear_const(¤t,dops[i].rt1);
7347 alloc_cc(¤t,i);
7348 dirty_reg(¤t,CCREG);
7349 if (!ds_writes_rjump_rs(i)) {
7350 alloc_reg(¤t,i,dops[i].rs1);
7351 if (dops[i].rt1!=0) {
7352 alloc_reg(¤t,i,dops[i].rt1);
7353 dirty_reg(¤t,dops[i].rt1);
7355 alloc_reg(¤t,i,PTEMP);
7359 if(dops[i].rs1==31) { // JALR
7360 alloc_reg(¤t,i,RHASH);
7361 alloc_reg(¤t,i,RHTBL);
7364 delayslot_alloc(¤t,i+1);
7366 // The delay slot overwrites our source register,
7367 // allocate a temporary register to hold the old value.
7371 delayslot_alloc(¤t,i+1);
7373 alloc_reg(¤t,i,RTEMP);
7375 //current.isconst=0; // DEBUG
7380 //current.isconst=0;
7381 //current.wasconst=0;
7382 //regs[i].wasconst=0;
7383 clear_const(¤t,dops[i].rs1);
7384 clear_const(¤t,dops[i].rs2);
7385 if((dops[i].opcode&0x3E)==4) // BEQ/BNE
7387 alloc_cc(¤t,i);
7388 dirty_reg(¤t,CCREG);
7389 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7390 if(dops[i].rs2) alloc_reg(¤t,i,dops[i].rs2);
7391 if((dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2))||
7392 (dops[i].rs2&&(dops[i].rs2==dops[i+1].rt1||dops[i].rs2==dops[i+1].rt2))) {
7393 // The delay slot overwrites one of our conditions.
7394 // Allocate the branch condition registers instead.
7398 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7399 if(dops[i].rs2) alloc_reg(¤t,i,dops[i].rs2);
7404 delayslot_alloc(¤t,i+1);
7408 if((dops[i].opcode&0x3E)==6) // BLEZ/BGTZ
7410 alloc_cc(¤t,i);
7411 dirty_reg(¤t,CCREG);
7412 alloc_reg(¤t,i,dops[i].rs1);
7413 if(dops[i].rs1&&(dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) {
7414 // The delay slot overwrites one of our conditions.
7415 // Allocate the branch condition registers instead.
7419 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7424 delayslot_alloc(¤t,i+1);
7428 // Don't alloc the delay slot yet because we might not execute it
7429 if((dops[i].opcode&0x3E)==0x14) // BEQL/BNEL
7434 alloc_cc(¤t,i);
7435 dirty_reg(¤t,CCREG);
7436 alloc_reg(¤t,i,dops[i].rs1);
7437 alloc_reg(¤t,i,dops[i].rs2);
7440 if((dops[i].opcode&0x3E)==0x16) // BLEZL/BGTZL
7445 alloc_cc(¤t,i);
7446 dirty_reg(¤t,CCREG);
7447 alloc_reg(¤t,i,dops[i].rs1);
7450 //current.isconst=0;
7453 clear_const(¤t,dops[i].rs1);
7454 clear_const(¤t,dops[i].rt1);
7456 alloc_cc(¤t,i);
7457 dirty_reg(¤t,CCREG);
7458 alloc_reg(¤t,i,dops[i].rs1);
7459 if (dops[i].rt1 == 31) { // BLTZAL/BGEZAL
7460 alloc_reg(¤t,i,31);
7461 dirty_reg(¤t,31);
7464 (dops[i].rs1==dops[i+1].rt1||dops[i].rs1==dops[i+1].rt2)) // The delay slot overwrites the branch condition.
7465 ||(dops[i].rt1 == 31 && dops[i].rs1 == 31) // overwrites it's own condition
7466 ||(dops[i].rt1==31&&(dops[i+1].rs1==31||dops[i+1].rs2==31||dops[i+1].rt1==31||dops[i+1].rt2==31))) { // DS touches $ra
7467 // Allocate the branch condition registers instead.
7471 if(dops[i].rs1) alloc_reg(¤t,i,dops[i].rs1);
7476 delayslot_alloc(¤t,i+1);
7480 //current.isconst=0;
7483 imm16_alloc(¤t,i);
7487 load_alloc(¤t,i);
7491 store_alloc(¤t,i);
7494 alu_alloc(¤t,i);
7497 shift_alloc(¤t,i);
7500 multdiv_alloc(¤t,i);
7503 shiftimm_alloc(¤t,i);
7506 mov_alloc(¤t,i);
7509 cop0_alloc(¤t,i);
7512 rfe_alloc(¤t,i);
7515 cop2_alloc(¤t,i);
7518 c2ls_alloc(¤t,i);
7521 c2op_alloc(¤t,i);
7526 syscall_alloc(¤t,i);
7530 // Create entry (branch target) regmap
7531 for(hr=0;hr<HOST_REGS;hr++)
7534 r=current.regmap[hr];
7536 if(r!=regmap_pre[i][hr]) {
7537 // TODO: delay slot (?)
7538 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
7539 if(or<0||r>=TEMPREG){
7540 regs[i].regmap_entry[hr]=-1;
7544 // Just move it to a different register
7545 regs[i].regmap_entry[hr]=r;
7546 // If it was dirty before, it's still dirty
7547 if((regs[i].wasdirty>>or)&1) dirty_reg(¤t,r);
7554 regs[i].regmap_entry[hr]=0;
7559 if((current.u>>r)&1) {
7560 regs[i].regmap_entry[hr]=-1;
7561 //regs[i].regmap[hr]=-1;
7562 current.regmap[hr]=-1;
7564 regs[i].regmap_entry[hr]=r;
7568 // Branches expect CCREG to be allocated at the target
7569 if(regmap_pre[i][hr]==CCREG)
7570 regs[i].regmap_entry[hr]=CCREG;
7572 regs[i].regmap_entry[hr]=-1;
7575 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
7578 #if 0 // see do_store_smc_check()
7579 if(i>0&&(dops[i-1].itype==STORE||dops[i-1].itype==STORELR||(dops[i-1].itype==C2LS&&dops[i-1].opcode==0x3a))&&(u_int)cinfo[i-1].imm<0x800)
7580 current.waswritten|=1<<dops[i-1].rs1;
7581 current.waswritten&=~(1<<dops[i].rt1);
7582 current.waswritten&=~(1<<dops[i].rt2);
7583 if((dops[i].itype==STORE||dops[i].itype==STORELR||(dops[i].itype==C2LS&&dops[i].opcode==0x3a))&&(u_int)cinfo[i].imm>=0x800)
7584 current.waswritten&=~(1<<dops[i].rs1);
7587 /* Branch post-alloc */
7590 current.wasdirty=current.dirty;
7591 switch(dops[i-1].itype) {
7593 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7594 branch_regs[i-1].isconst=0;
7595 branch_regs[i-1].wasconst=0;
7596 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7597 alloc_cc(&branch_regs[i-1],i-1);
7598 dirty_reg(&branch_regs[i-1],CCREG);
7599 if(dops[i-1].rt1==31) { // JAL
7600 alloc_reg(&branch_regs[i-1],i-1,31);
7601 dirty_reg(&branch_regs[i-1],31);
7603 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7604 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7607 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7608 branch_regs[i-1].isconst=0;
7609 branch_regs[i-1].wasconst=0;
7610 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7611 alloc_cc(&branch_regs[i-1],i-1);
7612 dirty_reg(&branch_regs[i-1],CCREG);
7613 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rs1);
7614 if(dops[i-1].rt1!=0) { // JALR
7615 alloc_reg(&branch_regs[i-1],i-1,dops[i-1].rt1);
7616 dirty_reg(&branch_regs[i-1],dops[i-1].rt1);
7619 if(dops[i-1].rs1==31) { // JALR
7620 alloc_reg(&branch_regs[i-1],i-1,RHASH);
7621 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
7624 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7625 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7628 if((dops[i-1].opcode&0x3E)==4) // BEQ/BNE
7630 alloc_cc(¤t,i-1);
7631 dirty_reg(¤t,CCREG);
7632 if((dops[i-1].rs1&&(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2))||
7633 (dops[i-1].rs2&&(dops[i-1].rs2==dops[i].rt1||dops[i-1].rs2==dops[i].rt2))) {
7634 // The delay slot overwrote one of our conditions
7635 // Delay slot goes after the test (in order)
7636 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7638 delayslot_alloc(¤t,i);
7643 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i-1].rs1)|(1LL<<dops[i-1].rs2));
7644 // Alloc the branch condition registers
7645 if(dops[i-1].rs1) alloc_reg(¤t,i-1,dops[i-1].rs1);
7646 if(dops[i-1].rs2) alloc_reg(¤t,i-1,dops[i-1].rs2);
7648 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7649 branch_regs[i-1].isconst=0;
7650 branch_regs[i-1].wasconst=0;
7651 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7652 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7655 if((dops[i-1].opcode&0x3E)==6) // BLEZ/BGTZ
7657 alloc_cc(¤t,i-1);
7658 dirty_reg(¤t,CCREG);
7659 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
7660 // The delay slot overwrote the branch condition
7661 // Delay slot goes after the test (in order)
7662 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7664 delayslot_alloc(¤t,i);
7669 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
7670 // Alloc the branch condition register
7671 alloc_reg(¤t,i-1,dops[i-1].rs1);
7673 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7674 branch_regs[i-1].isconst=0;
7675 branch_regs[i-1].wasconst=0;
7676 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7677 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7682 alloc_cc(¤t,i-1);
7683 dirty_reg(¤t,CCREG);
7684 if(dops[i-1].rs1==dops[i].rt1||dops[i-1].rs1==dops[i].rt2) {
7685 // The delay slot overwrote the branch condition
7686 // Delay slot goes after the test (in order)
7687 current.u=branch_unneeded_reg[i-1]&~((1LL<<dops[i].rs1)|(1LL<<dops[i].rs2));
7689 delayslot_alloc(¤t,i);
7694 current.u=branch_unneeded_reg[i-1]&~(1LL<<dops[i-1].rs1);
7695 // Alloc the branch condition register
7696 alloc_reg(¤t,i-1,dops[i-1].rs1);
7698 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7699 branch_regs[i-1].isconst=0;
7700 branch_regs[i-1].wasconst=0;
7701 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7702 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7707 if (dops[i-1].is_ujump)
7709 if(dops[i-1].rt1==31) // JAL/JALR
7711 // Subroutine call will return here, don't alloc any registers
7713 clear_all_regs(current.regmap);
7714 alloc_reg(¤t,i,CCREG);
7715 dirty_reg(¤t,CCREG);
7719 // Internal branch will jump here, match registers to caller
7721 clear_all_regs(current.regmap);
7722 alloc_reg(¤t,i,CCREG);
7723 dirty_reg(¤t,CCREG);
7726 if(cinfo[j].ba==start+i*4+4) {
7727 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
7728 current.dirty=branch_regs[j].dirty;
7733 if(cinfo[j].ba==start+i*4+4) {
7734 for(hr=0;hr<HOST_REGS;hr++) {
7735 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
7736 current.regmap[hr]=-1;
7738 current.dirty&=branch_regs[j].dirty;
7747 // Count cycles in between branches
7748 cinfo[i].ccadj = CLOCK_ADJUST(cc);
7749 if (i > 0 && (dops[i-1].is_jump || dops[i].is_exception))
7753 #if !defined(DRC_DBG)
7754 else if(dops[i].itype==C2OP&>e_cycletab[source[i]&0x3f]>2)
7756 // this should really be removed since the real stalls have been implemented,
7757 // but doing so causes sizeable perf regression against the older version
7758 u_int gtec = gte_cycletab[source[i] & 0x3f];
7759 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? gtec/2 : 2;
7761 else if(i>1&&dops[i].itype==STORE&&dops[i-1].itype==STORE&&dops[i-2].itype==STORE&&!dops[i].bt)
7765 else if(dops[i].itype==C2LS)
7767 // same as with C2OP
7768 cc += HACK_ENABLED(NDHACK_NO_STALLS) ? 4 : 2;
7776 if(!dops[i].is_ds) {
7777 regs[i].dirty=current.dirty;
7778 regs[i].isconst=current.isconst;
7779 memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
7781 for(hr=0;hr<HOST_REGS;hr++) {
7782 if(hr!=EXCLUDE_REG&®s[i].regmap[hr]>=0) {
7783 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
7784 regs[i].wasconst&=~(1<<hr);
7788 //regs[i].waswritten=current.waswritten;
7792 static noinline void pass4_cull_unused_regs(void)
7794 u_int last_needed_regs[4] = {0,0,0,0};
7798 for (i=slen-1;i>=0;i--)
7801 __builtin_prefetch(regs[i-2].regmap);
7804 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
7806 // Branch out of this block, don't need anything
7812 // Need whatever matches the target
7814 int t=(cinfo[i].ba-start)>>2;
7815 for(hr=0;hr<HOST_REGS;hr++)
7817 if(regs[i].regmap_entry[hr]>=0) {
7818 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
7822 // Conditional branch may need registers for following instructions
7823 if (!dops[i].is_ujump)
7826 nr |= last_needed_regs[(i+2) & 3];
7827 for(hr=0;hr<HOST_REGS;hr++)
7829 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
7830 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
7834 // Don't need stuff which is overwritten
7835 //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7836 //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7837 // Merge in delay slot
7838 if (dops[i+1].rt1) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt1);
7839 if (dops[i+1].rt2) nr &= ~get_regm(regs[i].regmap, dops[i+1].rt2);
7840 nr |= get_regm(regmap_pre[i], dops[i+1].rs1);
7841 nr |= get_regm(regmap_pre[i], dops[i+1].rs2);
7842 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs1);
7843 nr |= get_regm(regs[i].regmap_entry, dops[i+1].rs2);
7844 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store)) {
7845 nr |= get_regm(regmap_pre[i], ROREG);
7846 nr |= get_regm(regs[i].regmap_entry, ROREG);
7848 if (dops[i+1].is_store) {
7849 nr |= get_regm(regmap_pre[i], INVCP);
7850 nr |= get_regm(regs[i].regmap_entry, INVCP);
7853 else if (dops[i].is_exception)
7855 // SYSCALL instruction, etc
7861 for(hr=0;hr<HOST_REGS;hr++) {
7862 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
7863 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
7864 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7865 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7869 // Overwritten registers are not needed
7870 if (dops[i].rt1) nr &= ~get_regm(regs[i].regmap, dops[i].rt1);
7871 if (dops[i].rt2) nr &= ~get_regm(regs[i].regmap, dops[i].rt2);
7872 nr &= ~get_regm(regs[i].regmap, FTEMP);
7873 // Source registers are needed
7874 nr |= get_regm(regmap_pre[i], dops[i].rs1);
7875 nr |= get_regm(regmap_pre[i], dops[i].rs2);
7876 nr |= get_regm(regs[i].regmap_entry, dops[i].rs1);
7877 nr |= get_regm(regs[i].regmap_entry, dops[i].rs2);
7878 if (ram_offset && (dops[i].is_load || dops[i].is_store)) {
7879 nr |= get_regm(regmap_pre[i], ROREG);
7880 nr |= get_regm(regs[i].regmap_entry, ROREG);
7882 if (dops[i].is_store) {
7883 nr |= get_regm(regmap_pre[i], INVCP);
7884 nr |= get_regm(regs[i].regmap_entry, INVCP);
7887 if (i > 0 && !dops[i].bt && regs[i].wasdirty)
7888 for(hr=0;hr<HOST_REGS;hr++)
7890 // Don't store a register immediately after writing it,
7891 // may prevent dual-issue.
7892 // But do so if this is a branch target, otherwise we
7893 // might have to load the register before the branch.
7894 if((regs[i].wasdirty>>hr)&1) {
7895 if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
7896 if(dops[i-1].rt1==regmap_pre[i][hr]) nr|=1<<hr;
7897 if(dops[i-1].rt2==regmap_pre[i][hr]) nr|=1<<hr;
7899 if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
7900 if(dops[i-1].rt1==regs[i].regmap_entry[hr]) nr|=1<<hr;
7901 if(dops[i-1].rt2==regs[i].regmap_entry[hr]) nr|=1<<hr;
7905 // Cycle count is needed at branches. Assume it is needed at the target too.
7906 if (i == 0 || dops[i].bt || dops[i].may_except || dops[i].itype == CJUMP) {
7907 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7908 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7911 last_needed_regs[i & 3] = nr;
7913 // Deallocate unneeded registers
7914 for(hr=0;hr<HOST_REGS;hr++)
7917 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
7920 int map1 = 0, map2 = 0, temp = 0; // or -1 ??
7921 if (dops[i+1].is_load || dops[i+1].is_store)
7923 if (dops[i+1].is_store)
7925 if(dops[i+1].itype==LOADLR || dops[i+1].itype==STORELR || dops[i+1].itype==C2LS)
7927 if(regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
7928 regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
7929 regs[i].regmap[hr]!=dops[i+1].rt1 && regs[i].regmap[hr]!=dops[i+1].rt2 &&
7930 regs[i].regmap[hr]!=dops[i+1].rs1 && regs[i].regmap[hr]!=dops[i+1].rs2 &&
7931 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=PTEMP &&
7932 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
7933 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
7934 regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2)
7936 regs[i].regmap[hr]=-1;
7937 regs[i].isconst&=~(1<<hr);
7938 regs[i].dirty&=~(1<<hr);
7939 regs[i+1].wasdirty&=~(1<<hr);
7940 if(branch_regs[i].regmap[hr]!=dops[i].rs1 && branch_regs[i].regmap[hr]!=dops[i].rs2 &&
7941 branch_regs[i].regmap[hr]!=dops[i].rt1 && branch_regs[i].regmap[hr]!=dops[i].rt2 &&
7942 branch_regs[i].regmap[hr]!=dops[i+1].rt1 && branch_regs[i].regmap[hr]!=dops[i+1].rt2 &&
7943 branch_regs[i].regmap[hr]!=dops[i+1].rs1 && branch_regs[i].regmap[hr]!=dops[i+1].rs2 &&
7944 branch_regs[i].regmap[hr]!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
7945 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
7946 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
7947 branch_regs[i].regmap[hr]!=map1 && branch_regs[i].regmap[hr]!=map2)
7949 branch_regs[i].regmap[hr]=-1;
7950 branch_regs[i].regmap_entry[hr]=-1;
7951 if (!dops[i].is_ujump)
7954 regmap_pre[i+2][hr]=-1;
7955 regs[i+2].wasconst&=~(1<<hr);
7966 int map1 = -1, map2 = -1, temp=-1;
7967 if (dops[i].is_load || dops[i].is_store)
7969 if (dops[i].is_store)
7971 if (dops[i].itype==LOADLR || dops[i].itype==STORELR || dops[i].itype==C2LS)
7973 if(regs[i].regmap[hr]!=dops[i].rt1 && regs[i].regmap[hr]!=dops[i].rt2 &&
7974 regs[i].regmap[hr]!=dops[i].rs1 && regs[i].regmap[hr]!=dops[i].rs2 &&
7975 regs[i].regmap[hr]!=temp && regs[i].regmap[hr]!=map1 && regs[i].regmap[hr]!=map2 &&
7976 //(dops[i].itype!=SPAN||regs[i].regmap[hr]!=CCREG)
7977 regs[i].regmap[hr] != CCREG)
7979 if(i<slen-1&&!dops[i].is_ds) {
7980 assert(regs[i].regmap[hr]<64);
7981 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]>0)
7982 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
7984 SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
7985 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
7987 regmap_pre[i+1][hr]=-1;
7988 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
7989 regs[i+1].wasconst&=~(1<<hr);
7991 regs[i].regmap[hr]=-1;
7992 regs[i].isconst&=~(1<<hr);
7993 regs[i].dirty&=~(1<<hr);
7994 regs[i+1].wasdirty&=~(1<<hr);
8003 // If a register is allocated during a loop, try to allocate it for the
8004 // entire loop, if possible. This avoids loading/storing registers
8005 // inside of the loop.
8006 static noinline void pass5a_preallocate1(void)
8009 signed char f_regmap[HOST_REGS];
8010 clear_all_regs(f_regmap);
8011 for(i=0;i<slen-1;i++)
8013 if(dops[i].itype==UJUMP||dops[i].itype==CJUMP||dops[i].itype==SJUMP)
8015 if(cinfo[i].ba>=start && cinfo[i].ba<(start+i*4))
8016 if(dops[i+1].itype==NOP||dops[i+1].itype==MOV||dops[i+1].itype==ALU
8017 ||dops[i+1].itype==SHIFTIMM||dops[i+1].itype==IMM16||dops[i+1].itype==LOAD
8018 ||dops[i+1].itype==STORE||dops[i+1].itype==STORELR
8019 ||dops[i+1].itype==SHIFT
8020 ||dops[i+1].itype==COP2||dops[i+1].itype==C2LS||dops[i+1].itype==C2OP)
8022 int t=(cinfo[i].ba-start)>>2;
8023 if(t > 0 && !dops[t-1].is_jump) // loop_preload can't handle jumps into delay slots
8024 if(t<2||(dops[t-2].itype!=UJUMP&&dops[t-2].itype!=RJUMP)||dops[t-2].rt1!=31) // call/ret assumes no registers allocated
8025 for(hr=0;hr<HOST_REGS;hr++)
8027 if(regs[i].regmap[hr]>=0) {
8028 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8029 // dealloc old register
8031 for(n=0;n<HOST_REGS;n++)
8033 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8035 // and alloc new one
8036 f_regmap[hr]=regs[i].regmap[hr];
8039 if(branch_regs[i].regmap[hr]>=0) {
8040 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
8041 // dealloc old register
8043 for(n=0;n<HOST_REGS;n++)
8045 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
8047 // and alloc new one
8048 f_regmap[hr]=branch_regs[i].regmap[hr];
8052 if(count_free_regs(regs[i].regmap)<=cinfo[i+1].min_free_regs)
8053 f_regmap[hr]=branch_regs[i].regmap[hr];
8055 if(count_free_regs(branch_regs[i].regmap)<=cinfo[i+1].min_free_regs)
8056 f_regmap[hr]=branch_regs[i].regmap[hr];
8058 // Avoid dirty->clean transition
8059 #ifdef DESTRUCTIVE_WRITEBACK
8060 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
8062 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
8063 // case above, however it's always a good idea. We can't hoist the
8064 // load if the register was already allocated, so there's no point
8065 // wasting time analyzing most of these cases. It only "succeeds"
8066 // when the mapping was different and the load can be replaced with
8067 // a mov, which is of negligible benefit. So such cases are
8069 if(f_regmap[hr]>0) {
8070 if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
8074 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,cinfo[i].ba,start+j*4,hr,r);
8075 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
8077 if(regs[j].regmap[hr]==f_regmap[hr]&&f_regmap[hr]<TEMPREG) {
8078 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,cinfo[i].ba,start+j*4,hr,r);
8080 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
8081 if(get_reg(regs[i].regmap,f_regmap[hr])>=0) break;
8082 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
8084 while(k>1&®s[k-1].regmap[hr]==-1) {
8085 if(count_free_regs(regs[k-1].regmap)<=cinfo[k-1].min_free_regs) {
8086 //printf("no free regs for store %x\n",start+(k-1)*4);
8089 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
8090 //printf("no-match due to different register\n");
8093 if (dops[k-2].is_jump) {
8094 //printf("no-match due to branch\n");
8097 // call/ret fast path assumes no registers allocated
8098 if(k>2&&(dops[k-3].itype==UJUMP||dops[k-3].itype==RJUMP)&&dops[k-3].rt1==31) {
8103 if(regs[k-1].regmap[hr]==f_regmap[hr]&®map_pre[k][hr]==f_regmap[hr]) {
8104 //printf("Extend r%d, %x ->\n",hr,start+k*4);
8106 regs[k].regmap_entry[hr]=f_regmap[hr];
8107 regs[k].regmap[hr]=f_regmap[hr];
8108 regmap_pre[k+1][hr]=f_regmap[hr];
8109 regs[k].wasdirty&=~(1<<hr);
8110 regs[k].dirty&=~(1<<hr);
8111 regs[k].wasdirty|=(1<<hr)®s[k-1].dirty;
8112 regs[k].dirty|=(1<<hr)®s[k].wasdirty;
8113 regs[k].wasconst&=~(1<<hr);
8114 regs[k].isconst&=~(1<<hr);
8119 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
8122 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
8123 if(regs[i-1].regmap[hr]==f_regmap[hr]&®map_pre[i][hr]==f_regmap[hr]) {
8124 //printf("OK fill %x (r%d)\n",start+i*4,hr);
8125 regs[i].regmap_entry[hr]=f_regmap[hr];
8126 regs[i].regmap[hr]=f_regmap[hr];
8127 regs[i].wasdirty&=~(1<<hr);
8128 regs[i].dirty&=~(1<<hr);
8129 regs[i].wasdirty|=(1<<hr)®s[i-1].dirty;
8130 regs[i].dirty|=(1<<hr)®s[i-1].dirty;
8131 regs[i].wasconst&=~(1<<hr);
8132 regs[i].isconst&=~(1<<hr);
8133 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
8134 branch_regs[i].wasdirty&=~(1<<hr);
8135 branch_regs[i].wasdirty|=(1<<hr)®s[i].dirty;
8136 branch_regs[i].regmap[hr]=f_regmap[hr];
8137 branch_regs[i].dirty&=~(1<<hr);
8138 branch_regs[i].dirty|=(1<<hr)®s[i].dirty;
8139 branch_regs[i].wasconst&=~(1<<hr);
8140 branch_regs[i].isconst&=~(1<<hr);
8141 if (!dops[i].is_ujump) {
8142 regmap_pre[i+2][hr]=f_regmap[hr];
8143 regs[i+2].wasdirty&=~(1<<hr);
8144 regs[i+2].wasdirty|=(1<<hr)®s[i].dirty;
8149 // Alloc register clean at beginning of loop,
8150 // but may dirty it in pass 6
8151 regs[k].regmap_entry[hr]=f_regmap[hr];
8152 regs[k].regmap[hr]=f_regmap[hr];
8153 regs[k].dirty&=~(1<<hr);
8154 regs[k].wasconst&=~(1<<hr);
8155 regs[k].isconst&=~(1<<hr);
8156 if (dops[k].is_jump) {
8157 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
8158 branch_regs[k].regmap[hr]=f_regmap[hr];
8159 branch_regs[k].dirty&=~(1<<hr);
8160 branch_regs[k].wasconst&=~(1<<hr);
8161 branch_regs[k].isconst&=~(1<<hr);
8162 if (!dops[k].is_ujump) {
8163 regmap_pre[k+2][hr]=f_regmap[hr];
8164 regs[k+2].wasdirty&=~(1<<hr);
8169 regmap_pre[k+1][hr]=f_regmap[hr];
8170 regs[k+1].wasdirty&=~(1<<hr);
8173 if(regs[j].regmap[hr]==f_regmap[hr])
8174 regs[j].regmap_entry[hr]=f_regmap[hr];
8178 if(regs[j].regmap[hr]>=0)
8180 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
8181 //printf("no-match due to different register\n");
8184 if (dops[j].is_ujump)
8186 // Stop on unconditional branch
8189 if(dops[j].itype==CJUMP||dops[j].itype==SJUMP)
8192 if(count_free_regs(regs[j].regmap)<=cinfo[j+1].min_free_regs)
8195 if(count_free_regs(branch_regs[j].regmap)<=cinfo[j+1].min_free_regs)
8198 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
8199 //printf("no-match due to different register (branch)\n");
8203 if(count_free_regs(regs[j].regmap)<=cinfo[j].min_free_regs) {
8204 //printf("No free regs for store %x\n",start+j*4);
8207 assert(f_regmap[hr]<64);
8214 // Non branch or undetermined branch target
8215 for(hr=0;hr<HOST_REGS;hr++)
8217 if(hr!=EXCLUDE_REG) {
8218 if(regs[i].regmap[hr]>=0) {
8219 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8220 // dealloc old register
8222 for(n=0;n<HOST_REGS;n++)
8224 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8226 // and alloc new one
8227 f_regmap[hr]=regs[i].regmap[hr];
8232 // Try to restore cycle count at branch targets
8234 for(j=i;j<slen-1;j++) {
8235 if(regs[j].regmap[HOST_CCREG]!=-1) break;
8236 if(count_free_regs(regs[j].regmap)<=cinfo[j].min_free_regs) {
8237 //printf("no free regs for store %x\n",start+j*4);
8241 if(regs[j].regmap[HOST_CCREG]==CCREG) {
8243 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
8245 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8246 regs[k].regmap[HOST_CCREG]=CCREG;
8247 regmap_pre[k+1][HOST_CCREG]=CCREG;
8248 regs[k+1].wasdirty|=1<<HOST_CCREG;
8249 regs[k].dirty|=1<<HOST_CCREG;
8250 regs[k].wasconst&=~(1<<HOST_CCREG);
8251 regs[k].isconst&=~(1<<HOST_CCREG);
8254 regs[j].regmap_entry[HOST_CCREG]=CCREG;
8256 // Work backwards from the branch target
8257 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
8259 //printf("Extend backwards\n");
8262 while(regs[k-1].regmap[HOST_CCREG]==-1) {
8263 if(count_free_regs(regs[k-1].regmap)<=cinfo[k-1].min_free_regs) {
8264 //printf("no free regs for store %x\n",start+(k-1)*4);
8269 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
8270 //printf("Extend CC, %x ->\n",start+k*4);
8272 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8273 regs[k].regmap[HOST_CCREG]=CCREG;
8274 regmap_pre[k+1][HOST_CCREG]=CCREG;
8275 regs[k+1].wasdirty|=1<<HOST_CCREG;
8276 regs[k].dirty|=1<<HOST_CCREG;
8277 regs[k].wasconst&=~(1<<HOST_CCREG);
8278 regs[k].isconst&=~(1<<HOST_CCREG);
8283 //printf("Fail Extend CC, %x ->\n",start+k*4);
8287 if(dops[i].itype!=STORE&&dops[i].itype!=STORELR&&dops[i].itype!=SHIFT&&
8288 dops[i].itype!=NOP&&dops[i].itype!=MOV&&dops[i].itype!=ALU&&dops[i].itype!=SHIFTIMM&&
8289 dops[i].itype!=IMM16&&dops[i].itype!=LOAD)
8291 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
8297 // This allocates registers (if possible) one instruction prior
8298 // to use, which can avoid a load-use penalty on certain CPUs.
8299 static noinline void pass5b_preallocate2(void)
8302 for(i=0;i<slen-1;i++)
8304 if (!i || !dops[i-1].is_jump)
8308 int j, can_steal = 1;
8309 for (j = i; j < i + 2; j++) {
8311 if (cinfo[j].min_free_regs == 0)
8313 for (hr = 0; hr < HOST_REGS; hr++)
8314 if (hr != EXCLUDE_REG && regs[j].regmap[hr] < 0)
8316 if (free_regs <= cinfo[j].min_free_regs) {
8323 if(dops[i].itype==ALU||dops[i].itype==MOV||dops[i].itype==LOAD||dops[i].itype==SHIFTIMM||dops[i].itype==IMM16
8324 ||(dops[i].itype==COP2&&dops[i].opcode2<3))
8327 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs1))>=0)
8329 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8331 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8332 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8333 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8334 regs[i].isconst&=~(1<<hr);
8335 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8336 constmap[i][hr]=constmap[i+1][hr];
8337 regs[i+1].wasdirty&=~(1<<hr);
8338 regs[i].dirty&=~(1<<hr);
8343 if((hr=get_reg(regs[i+1].regmap,dops[i+1].rs2))>=0)
8345 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8347 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8348 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8349 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8350 regs[i].isconst&=~(1<<hr);
8351 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8352 constmap[i][hr]=constmap[i+1][hr];
8353 regs[i+1].wasdirty&=~(1<<hr);
8354 regs[i].dirty&=~(1<<hr);
8358 // Preload target address for load instruction (non-constant)
8359 if(dops[i+1].itype==LOAD&&dops[i+1].rs1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8360 if((hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1))>=0)
8362 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8364 regs[i].regmap[hr]=dops[i+1].rs1;
8365 regmap_pre[i+1][hr]=dops[i+1].rs1;
8366 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8367 regs[i].isconst&=~(1<<hr);
8368 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8369 constmap[i][hr]=constmap[i+1][hr];
8370 regs[i+1].wasdirty&=~(1<<hr);
8371 regs[i].dirty&=~(1<<hr);
8375 // Load source into target register
8376 if(dops[i+1].use_lt1&&get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8377 if((hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1))>=0)
8379 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8381 regs[i].regmap[hr]=dops[i+1].rs1;
8382 regmap_pre[i+1][hr]=dops[i+1].rs1;
8383 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8384 regs[i].isconst&=~(1<<hr);
8385 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8386 constmap[i][hr]=constmap[i+1][hr];
8387 regs[i+1].wasdirty&=~(1<<hr);
8388 regs[i].dirty&=~(1<<hr);
8392 // Address for store instruction (non-constant)
8393 if (dops[i+1].is_store) { // SB/SH/SW/SWC2
8394 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8395 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
8396 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8398 regs[i+1].regmap[hr]=AGEN1+((i+1)&1);
8399 regs[i+1].isconst&=~(1<<hr);
8400 regs[i+1].dirty&=~(1<<hr);
8401 regs[i+2].wasdirty&=~(1<<hr);
8404 #if 0 // what is this for? double allocs $0 in ps1_rom.bin
8405 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8407 regs[i].regmap[hr]=dops[i+1].rs1;
8408 regmap_pre[i+1][hr]=dops[i+1].rs1;
8409 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8410 regs[i].isconst&=~(1<<hr);
8411 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8412 constmap[i][hr]=constmap[i+1][hr];
8413 regs[i+1].wasdirty&=~(1<<hr);
8414 regs[i].dirty&=~(1<<hr);
8419 if (dops[i+1].itype == LOADLR || dops[i+1].opcode == 0x32) { // LWC2
8420 if(get_reg(regs[i+1].regmap,dops[i+1].rs1)<0) {
8422 hr=get_reg(regs[i+1].regmap,FTEMP);
8424 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8426 regs[i].regmap[hr]=dops[i+1].rs1;
8427 regmap_pre[i+1][hr]=dops[i+1].rs1;
8428 regs[i+1].regmap_entry[hr]=dops[i+1].rs1;
8429 regs[i].isconst&=~(1<<hr);
8430 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8431 constmap[i][hr]=constmap[i+1][hr];
8432 regs[i+1].wasdirty&=~(1<<hr);
8433 regs[i].dirty&=~(1<<hr);
8435 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
8437 // move it to another register
8438 regs[i+1].regmap[hr]=-1;
8439 regmap_pre[i+2][hr]=-1;
8440 regs[i+1].regmap[nr]=FTEMP;
8441 regmap_pre[i+2][nr]=FTEMP;
8442 regs[i].regmap[nr]=dops[i+1].rs1;
8443 regmap_pre[i+1][nr]=dops[i+1].rs1;
8444 regs[i+1].regmap_entry[nr]=dops[i+1].rs1;
8445 regs[i].isconst&=~(1<<nr);
8446 regs[i+1].isconst&=~(1<<nr);
8447 regs[i].dirty&=~(1<<nr);
8448 regs[i+1].wasdirty&=~(1<<nr);
8449 regs[i+1].dirty&=~(1<<nr);
8450 regs[i+2].wasdirty&=~(1<<nr);
8454 if(dops[i+1].itype==LOAD||dops[i+1].itype==LOADLR||dops[i+1].itype==STORE||dops[i+1].itype==STORELR/*||dops[i+1].itype==C2LS*/) {
8456 if(dops[i+1].itype==LOAD)
8457 hr=get_reg_w(regs[i+1].regmap, dops[i+1].rt1);
8458 if (dops[i+1].itype == LOADLR || dops[i+1].opcode == 0x32) // LWC2
8459 hr=get_reg(regs[i+1].regmap,FTEMP);
8460 if (dops[i+1].is_store) {
8461 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
8462 if(hr<0) hr=get_reg_temp(regs[i+1].regmap);
8464 if(hr>=0&®s[i].regmap[hr]<0) {
8465 int rs=get_reg(regs[i+1].regmap,dops[i+1].rs1);
8466 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
8467 regs[i].regmap[hr]=AGEN1+((i+1)&1);
8468 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
8469 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
8470 regs[i].isconst&=~(1<<hr);
8471 regs[i+1].wasdirty&=~(1<<hr);
8472 regs[i].dirty&=~(1<<hr);
8482 // Write back dirty registers as soon as we will no longer modify them,
8483 // so that we don't end up with lots of writes at the branches.
8484 static noinline void pass6_clean_registers(int istart, int iend, int wr)
8486 static u_int wont_dirty[MAXBLOCK];
8487 static u_int will_dirty[MAXBLOCK];
8490 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
8491 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
8493 will_dirty_i=will_dirty_next=0;
8494 wont_dirty_i=wont_dirty_next=0;
8496 will_dirty_i=will_dirty_next=will_dirty[iend+1];
8497 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
8499 for (i=iend;i>=istart;i--)
8501 signed char rregmap_i[RRMAP_SIZE];
8502 u_int hr_candirty = 0;
8503 assert(HOST_REGS < 32);
8504 make_rregs(regs[i].regmap, rregmap_i, &hr_candirty);
8505 __builtin_prefetch(regs[i-1].regmap);
8508 signed char branch_rregmap_i[RRMAP_SIZE];
8509 u_int branch_hr_candirty = 0;
8510 make_rregs(branch_regs[i].regmap, branch_rregmap_i, &branch_hr_candirty);
8511 if(cinfo[i].ba<start || cinfo[i].ba>=(start+slen*4))
8513 // Branch out of this block, flush all regs
8515 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8516 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8517 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8518 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8519 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8520 will_dirty_i &= branch_hr_candirty;
8521 if (dops[i].is_ujump)
8523 // Unconditional branch
8525 // Merge in delay slot (will dirty)
8526 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8527 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8528 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8529 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8530 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8531 will_dirty_i &= hr_candirty;
8535 // Conditional branch
8536 wont_dirty_i = wont_dirty_next;
8537 // Merge in delay slot (will dirty)
8538 // (the original code had no explanation why these 2 are commented out)
8539 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8540 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8541 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8542 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8543 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8544 will_dirty_i &= hr_candirty;
8546 // Merge in delay slot (wont dirty)
8547 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8548 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8549 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8550 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8551 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8552 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8553 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8554 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8555 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8556 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8557 wont_dirty_i &= ~(1u << 31);
8559 #ifndef DESTRUCTIVE_WRITEBACK
8560 branch_regs[i].dirty&=wont_dirty_i;
8562 branch_regs[i].dirty|=will_dirty_i;
8568 if(cinfo[i].ba<=start+i*4) {
8570 if (dops[i].is_ujump)
8572 // Unconditional branch
8575 // Merge in delay slot (will dirty)
8576 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8577 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8578 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8579 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8580 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8581 temp_will_dirty &= branch_hr_candirty;
8582 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8583 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8584 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8585 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8586 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8587 temp_will_dirty &= hr_candirty;
8589 // Conditional branch (not taken case)
8590 temp_will_dirty=will_dirty_next;
8591 temp_wont_dirty=wont_dirty_next;
8592 // Merge in delay slot (will dirty)
8593 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8594 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8595 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8596 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8597 temp_will_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8598 temp_will_dirty &= branch_hr_candirty;
8599 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8600 //temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8601 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8602 temp_will_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8603 temp_will_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8604 temp_will_dirty &= hr_candirty;
8606 // Merge in delay slot (wont dirty)
8607 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8608 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8609 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8610 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8611 temp_wont_dirty |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8612 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8613 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8614 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8615 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8616 temp_wont_dirty |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8617 temp_wont_dirty &= ~(1u << 31);
8618 // Deal with changed mappings
8620 for(r=0;r<HOST_REGS;r++) {
8621 if(r!=EXCLUDE_REG) {
8622 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
8623 temp_will_dirty&=~(1<<r);
8624 temp_wont_dirty&=~(1<<r);
8625 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8626 temp_will_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8627 temp_wont_dirty|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8629 temp_will_dirty|=1<<r;
8630 temp_wont_dirty|=1<<r;
8637 will_dirty[i]=temp_will_dirty;
8638 wont_dirty[i]=temp_wont_dirty;
8639 pass6_clean_registers((cinfo[i].ba-start)>>2,i-1,0);
8641 // Limit recursion. It can take an excessive amount
8642 // of time if there are a lot of nested loops.
8643 will_dirty[(cinfo[i].ba-start)>>2]=0;
8644 wont_dirty[(cinfo[i].ba-start)>>2]=-1;
8649 if (dops[i].is_ujump)
8651 // Unconditional branch
8654 //if(cinfo[i].ba>start+i*4) { // Disable recursion (for debugging)
8655 for(r=0;r<HOST_REGS;r++) {
8656 if(r!=EXCLUDE_REG) {
8657 if(branch_regs[i].regmap[r]==regs[(cinfo[i].ba-start)>>2].regmap_entry[r]) {
8658 will_dirty_i|=will_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8659 wont_dirty_i|=wont_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8661 if(branch_regs[i].regmap[r]>=0) {
8662 will_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8663 wont_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>branch_regs[i].regmap[r])&1)<<r;
8668 // Merge in delay slot
8669 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8670 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8671 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8672 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8673 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8674 will_dirty_i &= branch_hr_candirty;
8675 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8676 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8677 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8678 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8679 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8680 will_dirty_i &= hr_candirty;
8682 // Conditional branch
8683 will_dirty_i=will_dirty_next;
8684 wont_dirty_i=wont_dirty_next;
8685 //if(cinfo[i].ba>start+i*4) // Disable recursion (for debugging)
8686 for(r=0;r<HOST_REGS;r++) {
8687 if(r!=EXCLUDE_REG) {
8688 signed char target_reg=branch_regs[i].regmap[r];
8689 if(target_reg==regs[(cinfo[i].ba-start)>>2].regmap_entry[r]) {
8690 will_dirty_i&=will_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8691 wont_dirty_i|=wont_dirty[(cinfo[i].ba-start)>>2]&(1<<r);
8693 else if(target_reg>=0) {
8694 will_dirty_i&=((unneeded_reg[(cinfo[i].ba-start)>>2]>>target_reg)&1)<<r;
8695 wont_dirty_i|=((unneeded_reg[(cinfo[i].ba-start)>>2]>>target_reg)&1)<<r;
8699 // Merge in delay slot
8700 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8701 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8702 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8703 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8704 will_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8705 will_dirty_i &= branch_hr_candirty;
8706 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8707 //will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8708 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8709 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8710 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8711 will_dirty_i &= hr_candirty;
8713 // Merge in delay slot (won't dirty)
8714 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8715 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8716 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt1) & 31);
8717 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i+1].rt2) & 31);
8718 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8719 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt1) & 31);
8720 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i].rt2) & 31);
8721 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt1) & 31);
8722 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, dops[i+1].rt2) & 31);
8723 wont_dirty_i |= 1u << (get_rreg(branch_rregmap_i, CCREG) & 31);
8724 wont_dirty_i &= ~(1u << 31);
8726 #ifndef DESTRUCTIVE_WRITEBACK
8727 branch_regs[i].dirty&=wont_dirty_i;
8729 branch_regs[i].dirty|=will_dirty_i;
8734 else if (dops[i].is_exception)
8736 // SYSCALL instruction, etc
8740 will_dirty_next=will_dirty_i;
8741 wont_dirty_next=wont_dirty_i;
8742 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8743 will_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8744 will_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8745 will_dirty_i &= hr_candirty;
8746 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt1) & 31);
8747 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i].rt2) & 31);
8748 wont_dirty_i |= 1u << (get_rreg(rregmap_i, CCREG) & 31);
8749 wont_dirty_i &= ~(1u << 31);
8750 if (i > istart && !dops[i].is_jump) {
8751 // Don't store a register immediately after writing it,
8752 // may prevent dual-issue.
8753 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt1) & 31);
8754 wont_dirty_i |= 1u << (get_rreg(rregmap_i, dops[i-1].rt2) & 31);
8757 will_dirty[i]=will_dirty_i;
8758 wont_dirty[i]=wont_dirty_i;
8759 // Mark registers that won't be dirtied as not dirty
8761 regs[i].dirty|=will_dirty_i;
8762 #ifndef DESTRUCTIVE_WRITEBACK
8763 regs[i].dirty&=wont_dirty_i;
8766 if (i < iend-1 && !dops[i].is_ujump) {
8767 for(r=0;r<HOST_REGS;r++) {
8768 if(r!=EXCLUDE_REG) {
8769 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
8770 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
8771 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
8779 for(r=0;r<HOST_REGS;r++) {
8780 if(r!=EXCLUDE_REG) {
8781 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
8782 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
8783 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
8790 // Deal with changed mappings
8791 temp_will_dirty=will_dirty_i;
8792 temp_wont_dirty=wont_dirty_i;
8793 for(r=0;r<HOST_REGS;r++) {
8794 if(r!=EXCLUDE_REG) {
8796 if(regs[i].regmap[r]==regmap_pre[i][r]) {
8798 #ifndef DESTRUCTIVE_WRITEBACK
8799 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8801 regs[i].wasdirty|=will_dirty_i&(1<<r);
8804 else if(regmap_pre[i][r]>=0&&(nr=get_rreg(rregmap_i,regmap_pre[i][r]))>=0) {
8805 // Register moved to a different register
8806 will_dirty_i&=~(1<<r);
8807 wont_dirty_i&=~(1<<r);
8808 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
8809 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
8811 #ifndef DESTRUCTIVE_WRITEBACK
8812 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
8814 regs[i].wasdirty|=will_dirty_i&(1<<r);
8818 will_dirty_i&=~(1<<r);
8819 wont_dirty_i&=~(1<<r);
8820 if(regmap_pre[i][r]>0 && regmap_pre[i][r]<34) {
8821 will_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8822 wont_dirty_i|=((unneeded_reg[i]>>regmap_pre[i][r])&1)<<r;
8825 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
8833 static noinline void pass10_expire_blocks(void)
8835 u_int step = MAX_OUTPUT_BLOCK_SIZE / PAGE_COUNT / 2;
8836 // not sizeof(ndrc->translation_cache) due to vita hack
8837 u_int step_mask = ((1u << TARGET_SIZE_2) - 1u) & ~(step - 1u);
8838 u_int end = (out - ndrc->translation_cache + EXPIRITY_OFFSET) & step_mask;
8839 u_int base_shift = __builtin_ctz(MAX_OUTPUT_BLOCK_SIZE);
8842 for (; expirep != end; expirep = ((expirep + step) & step_mask))
8844 u_int base_offs = expirep & ~(MAX_OUTPUT_BLOCK_SIZE - 1);
8845 u_int block_i = expirep / step & (PAGE_COUNT - 1);
8846 u_int phase = (expirep >> (base_shift - 1)) & 1u;
8847 if (!(expirep & (MAX_OUTPUT_BLOCK_SIZE / 2 - 1))) {
8848 inv_debug("EXP: base_offs %x/%lx phase %u\n", base_offs,
8849 (long)(out - ndrc->translation_cache), phase);
8853 hit = blocks_remove_matching_addrs(&blocks[block_i], base_offs, base_shift);
8857 memset(mini_ht, -1, sizeof(mini_ht));
8862 unlink_jumps_tc_range(jumps[block_i], base_offs, base_shift);
8866 static struct block_info *new_block_info(u_int start, u_int len,
8867 const void *source, const void *copy, u_char *beginning, u_short jump_in_count)
8869 struct block_info **b_pptr;
8870 struct block_info *block;
8871 u_int page = get_page(start);
8873 block = malloc(sizeof(*block) + jump_in_count * sizeof(block->jump_in[0]));
8875 assert(jump_in_count > 0);
8876 block->source = source;
8878 block->start = start;
8880 block->reg_sv_flags = 0;
8881 block->tc_offs = beginning - ndrc->translation_cache;
8882 //block->tc_len = out - beginning;
8883 block->is_dirty = 0;
8884 block->inv_near_misses = 0;
8885 block->jump_in_cnt = jump_in_count;
8887 // insert sorted by start mirror-unmasked vaddr
8888 for (b_pptr = &blocks[page]; ; b_pptr = &((*b_pptr)->next)) {
8889 if (*b_pptr == NULL || (*b_pptr)->start >= start) {
8890 block->next = *b_pptr;
8895 stat_inc(stat_blocks);
8899 static int new_recompile_block(u_int addr)
8901 u_int pagelimit = 0;
8902 u_int state_rflags = 0;
8905 assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
8908 if (addr != hack_addr) {
8909 SysPrintf("game crash @%08x, ra=%08x\n", addr, psxRegs.GPR.n.ra);
8915 // this is just for speculation
8916 for (i = 1; i < 32; i++) {
8917 if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
8918 state_rflags |= 1 << i;
8922 new_dynarec_did_compile=1;
8923 if (Config.HLE && start == 0x80001000) // hlecall
8925 void *beginning = start_block();
8927 emit_movimm(start,0);
8928 emit_writeword(0,&pcaddr);
8929 emit_far_jump(new_dyna_leave);
8931 end_block(beginning);
8932 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8933 block->jump_in[0].vaddr = start;
8934 block->jump_in[0].addr = beginning;
8937 else if (f1_hack && hack_addr == 0) {
8938 void *beginning = start_block();
8939 emit_movimm(start, 0);
8940 emit_writeword(0, &hack_addr);
8941 emit_readword(&psxRegs.GPR.n.sp, 0);
8942 emit_readptr(&mem_rtab, 1);
8943 emit_shrimm(0, 12, 2);
8944 emit_readptr_dualindexedx_ptrlen(1, 2, 1);
8945 emit_addimm(0, 0x18, 0);
8946 emit_adds_ptr(1, 1, 1);
8947 emit_ldr_dualindexed(1, 0, 0);
8948 emit_writeword(0, &psxRegs.GPR.r[26]); // lw k0, 0x18(sp)
8949 emit_far_call(ndrc_get_addr_ht);
8950 emit_jmpreg(0); // jr k0
8952 end_block(beginning);
8954 struct block_info *block = new_block_info(start, 4, NULL, NULL, beginning, 1);
8955 block->jump_in[0].vaddr = start;
8956 block->jump_in[0].addr = beginning;
8957 SysPrintf("F1 hack to %08x\n", start);
8961 cycle_multiplier_active = Config.cycle_multiplier_override && Config.cycle_multiplier == CYCLE_MULT_DEFAULT
8962 ? Config.cycle_multiplier_override : Config.cycle_multiplier;
8964 source = get_source_start(start, &pagelimit);
8965 if (source == NULL) {
8966 if (addr != hack_addr) {
8967 SysPrintf("Compile at bogus memory address: %08x\n", addr);
8974 /* Pass 1: disassemble */
8975 /* Pass 2: register dependencies, branch targets */
8976 /* Pass 3: register allocation */
8977 /* Pass 4: branch dependencies */
8978 /* Pass 5: pre-alloc */
8979 /* Pass 6: optimize clean/dirty state */
8980 /* Pass 7: flag 32-bit registers */
8981 /* Pass 8: assembly */
8982 /* Pass 9: linker */
8983 /* Pass 10: garbage collection / free memory */
8985 /* Pass 1 disassembly */
8987 pass1_disassemble(pagelimit);
8989 int clear_hack_addr = apply_hacks();
8991 /* Pass 2 - Register dependencies and branch targets */
8993 pass2_unneeded_regs(0,slen-1,0);
8995 pass2a_unneeded_other();
8997 /* Pass 3 - Register allocation */
8999 pass3_register_alloc(addr);
9001 /* Pass 4 - Cull unused host registers */
9003 pass4_cull_unused_regs();
9005 /* Pass 5 - Pre-allocate registers */
9007 pass5a_preallocate1();
9008 pass5b_preallocate2();
9010 /* Pass 6 - Optimize clean/dirty state */
9011 pass6_clean_registers(0, slen-1, 1);
9014 for (i=slen-1;i>=0;i--)
9016 if(dops[i].itype==CJUMP||dops[i].itype==SJUMP)
9018 // Conditional branch
9019 if((source[i]>>16)!=0x1000&&i<slen-2) {
9020 // Mark this address as a branch target since it may be called
9021 // upon return from interrupt
9027 /* Pass 8 - Assembly */
9028 linkcount=0;stubcount=0;
9031 void *beginning=start_block();
9032 void *instr_addr0_override = NULL;
9035 if (start == 0x80030000) {
9036 // nasty hack for the fastbios thing
9037 // override block entry to this code
9038 instr_addr0_override = out;
9039 emit_movimm(start,0);
9040 // abuse io address var as a flag that we
9041 // have already returned here once
9042 emit_readword(&address,1);
9043 emit_writeword(0,&pcaddr);
9044 emit_writeword(0,&address);
9047 emit_jeq(out + 4*2);
9048 emit_far_jump(new_dyna_leave);
9050 emit_jne(new_dyna_leave);
9055 __builtin_prefetch(regs[i+1].regmap);
9056 check_regmap(regmap_pre[i]);
9057 check_regmap(regs[i].regmap_entry);
9058 check_regmap(regs[i].regmap);
9059 //if(ds) printf("ds: ");
9060 disassemble_inst(i);
9062 ds=0; // Skip delay slot
9063 if(dops[i].bt) assem_debug("OOPS - branch into delay slot\n");
9064 instr_addr[i] = NULL;
9066 speculate_register_values(i);
9067 #ifndef DESTRUCTIVE_WRITEBACK
9068 if (i < 2 || !dops[i-2].is_ujump)
9070 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
9072 if((dops[i].itype==CJUMP||dops[i].itype==SJUMP)) {
9073 dirty_pre=branch_regs[i].dirty;
9075 dirty_pre=regs[i].dirty;
9079 if (i < 2 || !dops[i-2].is_ujump)
9081 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
9082 loop_preload(regmap_pre[i],regs[i].regmap_entry);
9084 // branch target entry point
9085 instr_addr[i] = out;
9086 assem_debug("<->\n");
9087 drc_dbg_emit_do_cmp(i, cinfo[i].ccadj);
9088 if (clear_hack_addr) {
9090 emit_writeword(0, &hack_addr);
9091 clear_hack_addr = 0;
9095 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG)
9096 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
9097 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i].rs1,dops[i].rs2);
9098 address_generation(i,®s[i],regs[i].regmap_entry);
9099 load_consts(regmap_pre[i],regs[i].regmap,i);
9102 // Load the delay slot registers if necessary
9103 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2&&(dops[i+1].rs1!=dops[i].rt1||dops[i].rt1==0))
9104 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9105 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2&&(dops[i+1].rs2!=dops[i].rt1||dops[i].rt1==0))
9106 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
9107 if (ram_offset && (dops[i+1].is_load || dops[i+1].is_store))
9108 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
9109 if (dops[i+1].is_store)
9110 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
9114 // Preload registers for following instruction
9115 if(dops[i+1].rs1!=dops[i].rs1&&dops[i+1].rs1!=dops[i].rs2)
9116 if(dops[i+1].rs1!=dops[i].rt1&&dops[i+1].rs1!=dops[i].rt2)
9117 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs1,dops[i+1].rs1);
9118 if(dops[i+1].rs2!=dops[i+1].rs1&&dops[i+1].rs2!=dops[i].rs1&&dops[i+1].rs2!=dops[i].rs2)
9119 if(dops[i+1].rs2!=dops[i].rt1&&dops[i+1].rs2!=dops[i].rt2)
9120 load_regs(regs[i].regmap_entry,regs[i].regmap,dops[i+1].rs2,dops[i+1].rs2);
9122 // TODO: if(is_ooo(i)) address_generation(i+1);
9123 if (!dops[i].is_jump || dops[i].itype == CJUMP)
9124 load_reg(regs[i].regmap_entry,regs[i].regmap,CCREG);
9125 if (ram_offset && (dops[i].is_load || dops[i].is_store))
9126 load_reg(regs[i].regmap_entry,regs[i].regmap,ROREG);
9127 if (dops[i].is_store)
9128 load_reg(regs[i].regmap_entry,regs[i].regmap,INVCP);
9130 ds = assemble(i, ®s[i], cinfo[i].ccadj);
9132 drc_dbg_emit_wb_dirtys(i, ®s[i]);
9133 if (dops[i].is_ujump)
9136 literal_pool_jumpover(256);
9141 if (slen > 0 && dops[slen-1].itype == INTCALL) {
9142 // no ending needed for this block since INTCALL never returns
9144 // If the block did not end with an unconditional branch,
9145 // add a jump to the next instruction.
9147 if (!dops[i-2].is_ujump) {
9148 assert(!dops[i-1].is_jump);
9150 if(dops[i-2].itype!=CJUMP&&dops[i-2].itype!=SJUMP) {
9151 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9152 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9153 emit_loadreg(CCREG,HOST_CCREG);
9154 emit_addimm(HOST_CCREG, cinfo[i-1].ccadj + CLOCK_ADJUST(1), HOST_CCREG);
9158 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
9159 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
9161 add_to_linker(out,start+i*4,0);
9168 assert(!dops[i-1].is_jump);
9169 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9170 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9171 emit_loadreg(CCREG,HOST_CCREG);
9172 emit_addimm(HOST_CCREG, cinfo[i-1].ccadj + CLOCK_ADJUST(1), HOST_CCREG);
9173 add_to_linker(out,start+i*4,0);
9178 for(i = 0; i < stubcount; i++)
9180 switch(stubs[i].type)
9187 do_readstub(i);break;
9191 do_writestub(i);break;
9195 do_invstub(i);break;
9197 do_unalignedwritestub(i);break;
9199 do_overflowstub(i); break;
9200 case ALIGNMENT_STUB:
9201 do_alignmentstub(i); break;
9207 if (instr_addr0_override)
9208 instr_addr[0] = instr_addr0_override;
9211 /* check for improper expiration */
9212 for (i = 0; i < ARRAY_SIZE(jumps); i++) {
9216 for (j = 0; j < jumps[i]->count; j++)
9217 assert(jumps[i]->e[j].stub < beginning || (u_char *)jumps[i]->e[j].stub > out);
9221 /* Pass 9 - Linker */
9222 for(i=0;i<linkcount;i++)
9224 assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
9226 if (!link_addr[i].internal)
9229 void *addr = check_addr(link_addr[i].target);
9230 emit_extjump(link_addr[i].addr, link_addr[i].target);
9232 set_jump_target(link_addr[i].addr, addr);
9233 ndrc_add_jump_out(link_addr[i].target,stub);
9236 set_jump_target(link_addr[i].addr, stub);
9241 int target=(link_addr[i].target-start)>>2;
9242 assert(target>=0&&target<slen);
9243 assert(instr_addr[target]);
9244 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9245 //set_jump_target_fillslot(link_addr[i].addr,instr_addr[target],link_addr[i].ext>>1);
9247 set_jump_target(link_addr[i].addr, instr_addr[target]);
9252 u_int source_len = slen*4;
9253 if (dops[slen-1].itype == INTCALL && source_len > 4)
9254 // no need to treat the last instruction as compiled
9255 // as interpreter fully handles it
9258 if ((u_char *)copy + source_len > (u_char *)shadow + sizeof(shadow))
9261 // External Branch Targets (jump_in)
9262 int jump_in_count = 1;
9263 assert(instr_addr[0]);
9264 for (i = 1; i < slen; i++)
9266 if (dops[i].bt && instr_addr[i])
9270 struct block_info *block =
9271 new_block_info(start, slen * 4, source, copy, beginning, jump_in_count);
9272 block->reg_sv_flags = state_rflags;
9275 for (i = 0; i < slen; i++)
9277 if ((i == 0 || dops[i].bt) && instr_addr[i])
9279 assem_debug("%p (%d) <- %8x\n", instr_addr[i], i, start + i*4);
9280 u_int vaddr = start + i*4;
9286 entry = instr_addr[i];
9288 emit_jmp(instr_addr[i]);
9290 block->jump_in[jump_in_i].vaddr = vaddr;
9291 block->jump_in[jump_in_i].addr = entry;
9295 assert(jump_in_i == jump_in_count);
9296 hash_table_add(block->jump_in[0].vaddr, block->jump_in[0].addr);
9297 // Write out the literal pool if necessary
9299 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9301 if(((u_int)out)&7) emit_addnop(13);
9303 assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
9304 //printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
9305 memcpy(copy, source, source_len);
9308 end_block(beginning);
9310 // If we're within 256K of the end of the buffer,
9311 // start over from the beginning. (Is 256K enough?)
9312 if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
9313 out = ndrc->translation_cache;
9315 // Trap writes to any of the pages we compiled
9316 mark_invalid_code(start, slen*4, 0);
9318 /* Pass 10 - Free memory by expiring oldest blocks */
9320 pass10_expire_blocks();
9325 stat_inc(stat_bc_direct);
9329 // vim:shiftwidth=2:expandtab