1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Mupen64plus - new_dynarec.c *
3 * Copyright (C) 2009-2011 Ari64 *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
19 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
22 #include <stdint.h> //include for uint64_t
27 #include <libkern/OSCacheControl.h>
30 #include <3ds_utils.h>
33 #include <psp2/kernel/sysmem.h>
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h"
39 #include "../psxinterpreter.h"
41 #include "emu_if.h" // emulator interface
43 #define noinline __attribute__((noinline,noclone))
45 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
49 //#define assem_debug printf
50 //#define inv_debug printf
51 #define assem_debug(...)
52 #define inv_debug(...)
55 #include "assem_x86.h"
58 #include "assem_x64.h"
61 #include "assem_arm.h"
64 #include "assem_arm64.h"
67 #define RAM_SIZE 0x200000
69 #define MAX_OUTPUT_BLOCK_SIZE 262144
73 u_char translation_cache[1 << TARGET_SIZE_2];
76 struct tramp_insns ops[2048 / sizeof(struct tramp_insns)];
77 const void *f[2048 / sizeof(void *)];
81 #ifdef BASE_ADDR_DYNAMIC
82 static struct ndrc_mem *ndrc;
84 static struct ndrc_mem ndrc_ __attribute__((aligned(4096)));
85 static struct ndrc_mem *ndrc = &ndrc_;
108 signed char regmap_entry[HOST_REGS];
109 signed char regmap[HOST_REGS];
115 u_int loadedconst; // host regs that have constants loaded
116 u_int waswritten; // MIPS regs that were used as store base before
119 // note: asm depends on this layout
125 struct ll_entry *next;
155 struct ht_entry hash_table[65536] __attribute__((aligned(16)));
156 struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
157 struct ll_entry *jump_dirty[4096];
159 static struct ll_entry *jump_out[4096];
161 static u_int *source;
162 static char insn[MAXBLOCK][10];
163 static u_char itype[MAXBLOCK];
164 static u_char opcode[MAXBLOCK];
165 static u_char opcode2[MAXBLOCK];
166 static u_char bt[MAXBLOCK];
167 static u_char rs1[MAXBLOCK];
168 static u_char rs2[MAXBLOCK];
169 static u_char rt1[MAXBLOCK];
170 static u_char rt2[MAXBLOCK];
171 static u_char dep1[MAXBLOCK];
172 static u_char dep2[MAXBLOCK];
173 static u_char lt1[MAXBLOCK];
174 static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
175 static uint64_t gte_rt[MAXBLOCK];
176 static uint64_t gte_unneeded[MAXBLOCK];
177 static u_int smrv[32]; // speculated MIPS register values
178 static u_int smrv_strong; // mask or regs that are likely to have correct values
179 static u_int smrv_weak; // same, but somewhat less likely
180 static u_int smrv_strong_next; // same, but after current insn executes
181 static u_int smrv_weak_next;
182 static int imm[MAXBLOCK];
183 static u_int ba[MAXBLOCK];
184 static char likely[MAXBLOCK];
185 static char is_ds[MAXBLOCK];
186 static char ooo[MAXBLOCK];
187 static uint64_t unneeded_reg[MAXBLOCK];
188 static uint64_t branch_unneeded_reg[MAXBLOCK];
189 static signed char regmap_pre[MAXBLOCK][HOST_REGS]; // pre-instruction i?
190 // contains 'real' consts at [i] insn, but may differ from what's actually
191 // loaded in host reg as 'final' value is always loaded, see get_final_value()
192 static uint32_t current_constmap[HOST_REGS];
193 static uint32_t constmap[MAXBLOCK][HOST_REGS];
194 static struct regstat regs[MAXBLOCK];
195 static struct regstat branch_regs[MAXBLOCK];
196 static signed char minimum_free_regs[MAXBLOCK];
197 static u_int needed_reg[MAXBLOCK];
198 static u_int wont_dirty[MAXBLOCK];
199 static u_int will_dirty[MAXBLOCK];
200 static int ccadj[MAXBLOCK];
202 static void *instr_addr[MAXBLOCK];
203 static struct link_entry link_addr[MAXBLOCK];
204 static int linkcount;
205 static struct code_stub stubs[MAXBLOCK*3];
206 static int stubcount;
207 static u_int literals[1024][2];
208 static int literalcount;
209 static int is_delayslot;
210 static char shadow[1048576] __attribute__((aligned(16)));
213 static u_int stop_after_jal;
215 static uintptr_t ram_offset;
217 static const uintptr_t ram_offset=0;
220 int new_dynarec_hacks;
221 int new_dynarec_hacks_pergame;
222 int new_dynarec_did_compile;
224 #define HACK_ENABLED(x) ((new_dynarec_hacks | new_dynarec_hacks_pergame) & (x))
226 extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
227 extern int last_count; // last absolute target, often = next_interupt
229 extern int pending_exception;
230 extern int branch_target;
231 extern uintptr_t mini_ht[32][2];
232 extern u_char restore_candidate[512];
234 /* registers that may be allocated */
236 #define LOREG 32 // lo
237 #define HIREG 33 // hi
238 //#define FSREG 34 // FPU status (FCSR)
239 #define CSREG 35 // Coprocessor status
240 #define CCREG 36 // Cycle count
241 #define INVCP 37 // Pointer to invalid_code
242 //#define MMREG 38 // Pointer to memory_map
243 //#define ROREG 39 // ram offset (if rdram!=0x80000000)
245 #define FTEMP 40 // FPU temporary register
246 #define PTEMP 41 // Prefetch temporary register
247 //#define TLREG 42 // TLB mapping offset
248 #define RHASH 43 // Return address hash
249 #define RHTBL 44 // Return address hash table address
250 #define RTEMP 45 // JR/JALR address register
252 #define AGEN1 46 // Address generation temporary register
253 //#define AGEN2 47 // Address generation temporary register
254 //#define MGEN1 48 // Maptable address generation temporary register
255 //#define MGEN2 49 // Maptable address generation temporary register
256 #define BTREG 50 // Branch target temporary register
258 /* instruction types */
259 #define NOP 0 // No operation
260 #define LOAD 1 // Load
261 #define STORE 2 // Store
262 #define LOADLR 3 // Unaligned load
263 #define STORELR 4 // Unaligned store
264 #define MOV 5 // Move
265 #define ALU 6 // Arithmetic/logic
266 #define MULTDIV 7 // Multiply/divide
267 #define SHIFT 8 // Shift by register
268 #define SHIFTIMM 9// Shift by immediate
269 #define IMM16 10 // 16-bit immediate
270 #define RJUMP 11 // Unconditional jump to register
271 #define UJUMP 12 // Unconditional jump
272 #define CJUMP 13 // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
273 #define SJUMP 14 // Conditional branch (regimm format)
274 #define COP0 15 // Coprocessor 0
275 #define COP1 16 // Coprocessor 1
276 #define C1LS 17 // Coprocessor 1 load/store
277 //#define FJUMP 18 // Conditional branch (floating point)
278 //#define FLOAT 19 // Floating point unit
279 //#define FCONV 20 // Convert integer to float
280 //#define FCOMP 21 // Floating point compare (sets FSREG)
281 #define SYSCALL 22// SYSCALL
282 #define OTHER 23 // Other
283 #define SPAN 24 // Branch/delay slot spans 2 pages
284 #define NI 25 // Not implemented
285 #define HLECALL 26// PCSX fake opcodes for HLE
286 #define COP2 27 // Coprocessor 2 move
287 #define C2LS 28 // Coprocessor 2 load/store
288 #define C2OP 29 // Coprocessor 2 operation
289 #define INTCALL 30// Call interpreter to handle rare corner cases
296 #define DJT_1 (void *)1l // no function, just a label in assem_debug log
297 #define DJT_2 (void *)2l
300 int new_recompile_block(u_int addr);
301 void *get_addr_ht(u_int vaddr);
302 void invalidate_block(u_int block);
303 void invalidate_addr(u_int addr);
304 void remove_hash(int vaddr);
306 void dyna_linker_ds();
308 void verify_code_ds();
311 void fp_exception_ds();
312 void jump_to_new_pc();
313 void call_gteStall();
314 void new_dyna_leave();
316 // Needed by assembler
317 static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
318 static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
319 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
320 static void load_all_regs(signed char i_regmap[]);
321 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
322 static void load_regs_entry(int t);
323 static void load_all_consts(signed char regmap[],u_int dirty,int i);
324 static u_int get_host_reglist(const signed char *regmap);
326 static int verify_dirty(const u_int *ptr);
327 static int get_final_value(int hr, int i, int *value);
328 static void add_stub(enum stub_type type, void *addr, void *retaddr,
329 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
330 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
331 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist);
332 static void add_to_linker(void *addr, u_int target, int ext);
333 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
334 static void *get_direct_memhandler(void *table, u_int addr,
335 enum stub_type type, uintptr_t *addr_host);
336 static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist);
337 static void pass_args(int a0, int a1);
338 static void emit_far_jump(const void *f);
339 static void emit_far_call(const void *f);
341 static void mprotect_w_x(void *start, void *end, int is_x)
345 // *Open* enables write on all memory that was
346 // allocated by sceKernelAllocMemBlockForVM()?
348 sceKernelCloseVMDomain();
350 sceKernelOpenVMDomain();
352 u_long mstart = (u_long)start & ~4095ul;
353 u_long mend = (u_long)end;
354 if (mprotect((void *)mstart, mend - mstart,
355 PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
356 SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
361 static void start_tcache_write(void *start, void *end)
363 mprotect_w_x(start, end, 0);
366 static void end_tcache_write(void *start, void *end)
368 #if defined(__arm__) || defined(__aarch64__)
369 size_t len = (char *)end - (char *)start;
370 #if defined(__BLACKBERRY_QNX__)
371 msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
372 #elif defined(__MACH__)
373 sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
375 sceKernelSyncVMDomain(sceBlock, start, len);
377 ctr_flush_invalidate_cache();
378 #elif defined(__aarch64__)
379 // as of 2021, __clear_cache() is still broken on arm64
380 // so here is a custom one :(
381 clear_cache_arm64(start, end);
383 __clear_cache(start, end);
388 mprotect_w_x(start, end, 1);
391 static void *start_block(void)
393 u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
394 if (end > ndrc->translation_cache + sizeof(ndrc->translation_cache))
395 end = ndrc->translation_cache + sizeof(ndrc->translation_cache);
396 start_tcache_write(out, end);
400 static void end_block(void *start)
402 end_tcache_write(start, out);
405 // also takes care of w^x mappings when patching code
406 static u_int needs_clear_cache[1<<(TARGET_SIZE_2-17)];
408 static void mark_clear_cache(void *target)
410 uintptr_t offset = (u_char *)target - ndrc->translation_cache;
411 u_int mask = 1u << ((offset >> 12) & 31);
412 if (!(needs_clear_cache[offset >> 17] & mask)) {
413 char *start = (char *)((uintptr_t)target & ~4095l);
414 start_tcache_write(start, start + 4095);
415 needs_clear_cache[offset >> 17] |= mask;
419 // Clearing the cache is rather slow on ARM Linux, so mark the areas
420 // that need to be cleared, and then only clear these areas once.
421 static void do_clear_cache(void)
424 for (i = 0; i < (1<<(TARGET_SIZE_2-17)); i++)
426 u_int bitmap = needs_clear_cache[i];
429 for (j = 0; j < 32; j++)
432 if (!(bitmap & (1<<j)))
435 start = ndrc->translation_cache + i*131072 + j*4096;
437 for (j++; j < 32; j++) {
438 if (!(bitmap & (1<<j)))
442 end_tcache_write(start, end);
444 needs_clear_cache[i] = 0;
448 //#define DEBUG_CYCLE_COUNT 1
450 #define NO_CYCLE_PENALTY_THR 12
452 int cycle_multiplier; // 100 for 1.0
453 int cycle_multiplier_override;
455 static int CLOCK_ADJUST(int x)
457 int m = cycle_multiplier_override
458 ? cycle_multiplier_override : cycle_multiplier;
460 return (x * m + s * 50) / 100;
463 // is the op an unconditional jump?
464 static int is_ujump(int i)
466 return itype[i] == UJUMP || itype[i] == RJUMP
467 || (source[i] >> 16) == 0x1000; // beq r0, r0, offset // b offset
470 static int is_jump(int i)
472 return itype[i] == RJUMP || itype[i] == UJUMP || itype[i] == CJUMP || itype[i] == SJUMP;
475 static u_int get_page(u_int vaddr)
477 u_int page=vaddr&~0xe0000000;
478 if (page < 0x1000000)
479 page &= ~0x0e00000; // RAM mirrors
481 if(page>2048) page=2048+(page&2047);
485 // no virtual mem in PCSX
486 static u_int get_vpage(u_int vaddr)
488 return get_page(vaddr);
491 static struct ht_entry *hash_table_get(u_int vaddr)
493 return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
496 static void hash_table_add(struct ht_entry *ht_bin, u_int vaddr, void *tcaddr)
498 ht_bin->vaddr[1] = ht_bin->vaddr[0];
499 ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
500 ht_bin->vaddr[0] = vaddr;
501 ht_bin->tcaddr[0] = tcaddr;
504 // some messy ari64's code, seems to rely on unsigned 32bit overflow
505 static int doesnt_expire_soon(void *tcaddr)
507 u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
508 return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
511 // Get address from virtual address
512 // This is called from the recompiled JR/JALR instructions
513 void noinline *get_addr(u_int vaddr)
515 u_int page=get_page(vaddr);
516 u_int vpage=get_vpage(vaddr);
517 struct ll_entry *head;
518 //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
521 if(head->vaddr==vaddr) {
522 //printf("TRACE: count=%d next=%d (get_addr match %x: %p)\n",Count,next_interupt,vaddr,head->addr);
523 hash_table_add(hash_table_get(vaddr), vaddr, head->addr);
528 head=jump_dirty[vpage];
530 if(head->vaddr==vaddr) {
531 //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %p)\n",Count,next_interupt,vaddr,head->addr);
532 // Don't restore blocks which are about to expire from the cache
533 if (doesnt_expire_soon(head->addr))
534 if (verify_dirty(head->addr)) {
535 //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
536 invalid_code[vaddr>>12]=0;
537 inv_code_start=inv_code_end=~0;
539 restore_candidate[vpage>>3]|=1<<(vpage&7);
541 else restore_candidate[page>>3]|=1<<(page&7);
542 struct ht_entry *ht_bin = hash_table_get(vaddr);
543 if (ht_bin->vaddr[0] == vaddr)
544 ht_bin->tcaddr[0] = head->addr; // Replace existing entry
546 hash_table_add(ht_bin, vaddr, head->addr);
553 //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
554 int r=new_recompile_block(vaddr);
555 if(r==0) return get_addr(vaddr);
556 // Execute in unmapped page, generate pagefault execption
558 Cause=(vaddr<<31)|0x8;
559 EPC=(vaddr&1)?vaddr-5:vaddr;
561 Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
562 EntryHi=BadVAddr&0xFFFFE000;
563 return get_addr_ht(0x80000000);
565 // Look up address in hash table first
566 void *get_addr_ht(u_int vaddr)
568 //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
569 const struct ht_entry *ht_bin = hash_table_get(vaddr);
570 if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
571 if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
572 return get_addr(vaddr);
575 void clear_all_regs(signed char regmap[])
578 for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
581 static signed char get_reg(const signed char regmap[],int r)
584 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map[hr]==r) return hr;
588 // Find a register that is available for two consecutive cycles
589 static signed char get_reg2(signed char regmap1[], const signed char regmap2[], int r)
592 for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&®map1[hr]==r&®map2[hr]==r) return hr;
596 int count_free_regs(signed char regmap[])
600 for(hr=0;hr<HOST_REGS;hr++)
602 if(hr!=EXCLUDE_REG) {
603 if(regmap[hr]<0) count++;
609 void dirty_reg(struct regstat *cur,signed char reg)
613 for (hr=0;hr<HOST_REGS;hr++) {
614 if((cur->regmap[hr]&63)==reg) {
620 static void set_const(struct regstat *cur, signed char reg, uint32_t value)
624 for (hr=0;hr<HOST_REGS;hr++) {
625 if(cur->regmap[hr]==reg) {
627 current_constmap[hr]=value;
632 static void clear_const(struct regstat *cur, signed char reg)
636 for (hr=0;hr<HOST_REGS;hr++) {
637 if((cur->regmap[hr]&63)==reg) {
638 cur->isconst&=~(1<<hr);
643 static int is_const(struct regstat *cur, signed char reg)
648 for (hr=0;hr<HOST_REGS;hr++) {
649 if((cur->regmap[hr]&63)==reg) {
650 return (cur->isconst>>hr)&1;
656 static uint32_t get_const(struct regstat *cur, signed char reg)
660 for (hr=0;hr<HOST_REGS;hr++) {
661 if(cur->regmap[hr]==reg) {
662 return current_constmap[hr];
665 SysPrintf("Unknown constant in r%d\n",reg);
669 // Least soon needed registers
670 // Look at the next ten instructions and see which registers
671 // will be used. Try not to reallocate these.
672 void lsn(u_char hsn[], int i, int *preferred_reg)
684 // Don't go past an unconditonal jump
691 if(rs1[i+j]) hsn[rs1[i+j]]=j;
692 if(rs2[i+j]) hsn[rs2[i+j]]=j;
693 if(rt1[i+j]) hsn[rt1[i+j]]=j;
694 if(rt2[i+j]) hsn[rt2[i+j]]=j;
695 if(itype[i+j]==STORE || itype[i+j]==STORELR) {
696 // Stores can allocate zero
700 // On some architectures stores need invc_ptr
701 #if defined(HOST_IMM8)
702 if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
706 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
714 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
716 // Follow first branch
717 int t=(ba[i+b]-start)>>2;
718 j=7-b;if(t+j>=slen) j=slen-t-1;
721 if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
722 if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
723 //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
724 //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
727 // TODO: preferred register based on backward branch
729 // Delay slot should preferably not overwrite branch conditions or cycle count
730 if (i > 0 && is_jump(i-1)) {
731 if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
732 if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
738 // Coprocessor load/store needs FTEMP, even if not declared
739 if(itype[i]==C1LS||itype[i]==C2LS) {
742 // Load L/R also uses FTEMP as a temporary register
743 if(itype[i]==LOADLR) {
746 // Also SWL/SWR/SDL/SDR
747 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
750 // Don't remove the miniht registers
751 if(itype[i]==UJUMP||itype[i]==RJUMP)
758 // We only want to allocate registers if we're going to use them again soon
759 int needed_again(int r, int i)
765 if (i > 0 && is_ujump(i-1))
767 if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
768 return 0; // Don't need any registers if exiting the block
778 // Don't go past an unconditonal jump
782 if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
789 if(rs1[i+j]==r) rn=j;
790 if(rs2[i+j]==r) rn=j;
791 if((unneeded_reg[i+j]>>r)&1) rn=10;
792 if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
800 if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
802 // Follow first branch
804 int t=(ba[i+b]-start)>>2;
805 j=7-b;if(t+j>=slen) j=slen-t-1;
808 if(!((unneeded_reg[t+j]>>r)&1)) {
809 if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
810 if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
821 // Try to match register allocations at the end of a loop with those
823 int loop_reg(int i, int r, int hr)
834 // Don't go past an unconditonal jump
841 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)
847 if((unneeded_reg[i+k]>>r)&1) return hr;
848 if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP))
850 if(ba[i+k]>=start && ba[i+k]<(start+i*4))
852 int t=(ba[i+k]-start)>>2;
853 int reg=get_reg(regs[t].regmap_entry,r);
854 if(reg>=0) return reg;
855 //reg=get_reg(regs[t+1].regmap_entry,r);
856 //if(reg>=0) return reg;
864 // Allocate every register, preserving source/target regs
865 void alloc_all(struct regstat *cur,int i)
869 for(hr=0;hr<HOST_REGS;hr++) {
870 if(hr!=EXCLUDE_REG) {
871 if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
872 ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
875 cur->dirty&=~(1<<hr);
878 if((cur->regmap[hr]&63)==0)
881 cur->dirty&=~(1<<hr);
888 static int host_tempreg_in_use;
890 static void host_tempreg_acquire(void)
892 assert(!host_tempreg_in_use);
893 host_tempreg_in_use = 1;
896 static void host_tempreg_release(void)
898 host_tempreg_in_use = 0;
901 static void host_tempreg_acquire(void) {}
902 static void host_tempreg_release(void) {}
906 extern void gen_interupt();
907 extern void do_insn_cmp();
908 #define FUNCNAME(f) { f, " " #f }
909 static const struct {
912 } function_names[] = {
913 FUNCNAME(cc_interrupt),
914 FUNCNAME(gen_interupt),
915 FUNCNAME(get_addr_ht),
917 FUNCNAME(jump_handler_read8),
918 FUNCNAME(jump_handler_read16),
919 FUNCNAME(jump_handler_read32),
920 FUNCNAME(jump_handler_write8),
921 FUNCNAME(jump_handler_write16),
922 FUNCNAME(jump_handler_write32),
923 FUNCNAME(invalidate_addr),
924 FUNCNAME(jump_to_new_pc),
925 FUNCNAME(call_gteStall),
926 FUNCNAME(new_dyna_leave),
928 FUNCNAME(pcsx_mtc0_ds),
929 FUNCNAME(do_insn_cmp),
931 FUNCNAME(verify_code),
935 static const char *func_name(const void *a)
938 for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
939 if (function_names[i].addr == a)
940 return function_names[i].name;
944 #define func_name(x) ""
948 #include "assem_x86.c"
951 #include "assem_x64.c"
954 #include "assem_arm.c"
957 #include "assem_arm64.c"
960 static void *get_trampoline(const void *f)
964 for (i = 0; i < ARRAY_SIZE(ndrc->tramp.f); i++) {
965 if (ndrc->tramp.f[i] == f || ndrc->tramp.f[i] == NULL)
968 if (i == ARRAY_SIZE(ndrc->tramp.f)) {
969 SysPrintf("trampoline table is full, last func %p\n", f);
972 if (ndrc->tramp.f[i] == NULL) {
973 start_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
974 ndrc->tramp.f[i] = f;
975 end_tcache_write(&ndrc->tramp.f[i], &ndrc->tramp.f[i + 1]);
977 return &ndrc->tramp.ops[i];
980 static void emit_far_jump(const void *f)
982 if (can_jump_or_call(f)) {
987 f = get_trampoline(f);
991 static void emit_far_call(const void *f)
993 if (can_jump_or_call(f)) {
998 f = get_trampoline(f);
1002 // Add virtual address mapping to linked list
1003 void ll_add(struct ll_entry **head,int vaddr,void *addr)
1005 struct ll_entry *new_entry;
1006 new_entry=malloc(sizeof(struct ll_entry));
1007 assert(new_entry!=NULL);
1008 new_entry->vaddr=vaddr;
1009 new_entry->reg_sv_flags=0;
1010 new_entry->addr=addr;
1011 new_entry->next=*head;
1015 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
1017 ll_add(head,vaddr,addr);
1018 (*head)->reg_sv_flags=reg_sv_flags;
1021 // Check if an address is already compiled
1022 // but don't return addresses which are about to expire from the cache
1023 void *check_addr(u_int vaddr)
1025 struct ht_entry *ht_bin = hash_table_get(vaddr);
1027 for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
1028 if (ht_bin->vaddr[i] == vaddr)
1029 if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
1030 if (isclean(ht_bin->tcaddr[i]))
1031 return ht_bin->tcaddr[i];
1033 u_int page=get_page(vaddr);
1034 struct ll_entry *head;
1036 while (head != NULL) {
1037 if (head->vaddr == vaddr) {
1038 if (doesnt_expire_soon(head->addr)) {
1039 // Update existing entry with current address
1040 if (ht_bin->vaddr[0] == vaddr) {
1041 ht_bin->tcaddr[0] = head->addr;
1044 if (ht_bin->vaddr[1] == vaddr) {
1045 ht_bin->tcaddr[1] = head->addr;
1048 // Insert into hash table with low priority.
1049 // Don't evict existing entries, as they are probably
1050 // addresses that are being accessed frequently.
1051 if (ht_bin->vaddr[0] == -1) {
1052 ht_bin->vaddr[0] = vaddr;
1053 ht_bin->tcaddr[0] = head->addr;
1055 else if (ht_bin->vaddr[1] == -1) {
1056 ht_bin->vaddr[1] = vaddr;
1057 ht_bin->tcaddr[1] = head->addr;
1067 void remove_hash(int vaddr)
1069 //printf("remove hash: %x\n",vaddr);
1070 struct ht_entry *ht_bin = hash_table_get(vaddr);
1071 if (ht_bin->vaddr[1] == vaddr) {
1072 ht_bin->vaddr[1] = -1;
1073 ht_bin->tcaddr[1] = NULL;
1075 if (ht_bin->vaddr[0] == vaddr) {
1076 ht_bin->vaddr[0] = ht_bin->vaddr[1];
1077 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
1078 ht_bin->vaddr[1] = -1;
1079 ht_bin->tcaddr[1] = NULL;
1083 void ll_remove_matching_addrs(struct ll_entry **head,uintptr_t addr,int shift)
1085 struct ll_entry *next;
1087 if(((uintptr_t)((*head)->addr)>>shift)==(addr>>shift) ||
1088 ((uintptr_t)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1090 inv_debug("EXP: Remove pointer to %p (%x)\n",(*head)->addr,(*head)->vaddr);
1091 remove_hash((*head)->vaddr);
1098 head=&((*head)->next);
1103 // Remove all entries from linked list
1104 void ll_clear(struct ll_entry **head)
1106 struct ll_entry *cur;
1107 struct ll_entry *next;
1118 // Dereference the pointers and remove if it matches
1119 static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
1122 uintptr_t ptr = (uintptr_t)get_pointer(head->addr);
1123 inv_debug("EXP: Lookup pointer to %lx at %p (%x)\n",(long)ptr,head->addr,head->vaddr);
1124 if(((ptr>>shift)==(addr>>shift)) ||
1125 (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1127 inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
1128 void *host_addr=find_extjump_insn(head->addr);
1129 mark_clear_cache(host_addr);
1130 set_jump_target(host_addr, head->addr);
1136 // This is called when we write to a compiled block (see do_invstub)
1137 static void invalidate_page(u_int page)
1139 struct ll_entry *head;
1140 struct ll_entry *next;
1144 inv_debug("INVALIDATE: %x\n",head->vaddr);
1145 remove_hash(head->vaddr);
1150 head=jump_out[page];
1153 inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
1154 void *host_addr=find_extjump_insn(head->addr);
1155 mark_clear_cache(host_addr);
1156 set_jump_target(host_addr, head->addr);
1163 static void invalidate_block_range(u_int block, u_int first, u_int last)
1165 u_int page=get_page(block<<12);
1166 //printf("first=%d last=%d\n",first,last);
1167 invalidate_page(page);
1168 assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1169 assert(last<page+5);
1170 // Invalidate the adjacent pages if a block crosses a 4K boundary
1172 invalidate_page(first);
1175 for(first=page+1;first<last;first++) {
1176 invalidate_page(first);
1180 // Don't trap writes
1181 invalid_code[block]=1;
1184 memset(mini_ht,-1,sizeof(mini_ht));
1188 void invalidate_block(u_int block)
1190 u_int page=get_page(block<<12);
1191 u_int vpage=get_vpage(block<<12);
1192 inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1193 //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1196 struct ll_entry *head;
1197 head=jump_dirty[vpage];
1198 //printf("page=%d vpage=%d\n",page,vpage);
1200 if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1201 u_char *start, *end;
1202 get_bounds(head->addr, &start, &end);
1203 //printf("start: %p end: %p\n", start, end);
1204 if (page < 2048 && start >= rdram && end < rdram+RAM_SIZE) {
1205 if (((start-rdram)>>12) <= page && ((end-1-rdram)>>12) >= page) {
1206 if ((((start-rdram)>>12)&2047) < first) first = ((start-rdram)>>12)&2047;
1207 if ((((end-1-rdram)>>12)&2047) > last) last = ((end-1-rdram)>>12)&2047;
1213 invalidate_block_range(block,first,last);
1216 void invalidate_addr(u_int addr)
1219 // this check is done by the caller
1220 //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1221 u_int page=get_vpage(addr);
1222 if(page<2048) { // RAM
1223 struct ll_entry *head;
1224 u_int addr_min=~0, addr_max=0;
1225 u_int mask=RAM_SIZE-1;
1226 u_int addr_main=0x80000000|(addr&mask);
1228 inv_code_start=addr_main&~0xfff;
1229 inv_code_end=addr_main|0xfff;
1232 // must check previous page too because of spans..
1234 inv_code_start-=0x1000;
1236 for(;pg1<=page;pg1++) {
1237 for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1238 u_char *start_h, *end_h;
1240 get_bounds(head->addr, &start_h, &end_h);
1241 start = (uintptr_t)start_h - ram_offset;
1242 end = (uintptr_t)end_h - ram_offset;
1243 if(start<=addr_main&&addr_main<end) {
1244 if(start<addr_min) addr_min=start;
1245 if(end>addr_max) addr_max=end;
1247 else if(addr_main<start) {
1248 if(start<inv_code_end)
1249 inv_code_end=start-1;
1252 if(end>inv_code_start)
1258 inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1259 inv_code_start=inv_code_end=~0;
1260 invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1264 inv_code_start=(addr&~mask)|(inv_code_start&mask);
1265 inv_code_end=(addr&~mask)|(inv_code_end&mask);
1266 inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1270 invalidate_block(addr>>12);
1273 // This is called when loading a save state.
1274 // Anything could have changed, so invalidate everything.
1275 void invalidate_all_pages(void)
1278 for(page=0;page<4096;page++)
1279 invalidate_page(page);
1280 for(page=0;page<1048576;page++)
1281 if(!invalid_code[page]) {
1282 restore_candidate[(page&2047)>>3]|=1<<(page&7);
1283 restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1286 memset(mini_ht,-1,sizeof(mini_ht));
1291 static void do_invstub(int n)
1294 u_int reglist=stubs[n].a;
1295 set_jump_target(stubs[n].addr, out);
1297 if(stubs[n].b!=0) emit_mov(stubs[n].b,0);
1298 emit_far_call(invalidate_addr);
1299 restore_regs(reglist);
1300 emit_jmp(stubs[n].retaddr); // return address
1303 // Add an entry to jump_out after making a link
1304 // src should point to code by emit_extjump2()
1305 void add_link(u_int vaddr,void *src)
1307 u_int page=get_page(vaddr);
1308 inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
1309 check_extjump2(src);
1310 ll_add(jump_out+page,vaddr,src);
1311 //void *ptr=get_pointer(src);
1312 //inv_debug("add_link: Pointer is to %p\n",ptr);
1315 // If a code block was found to be unmodified (bit was set in
1316 // restore_candidate) and it remains unmodified (bit is clear
1317 // in invalid_code) then move the entries for that 4K page from
1318 // the dirty list to the clean list.
1319 void clean_blocks(u_int page)
1321 struct ll_entry *head;
1322 inv_debug("INV: clean_blocks page=%d\n",page);
1323 head=jump_dirty[page];
1325 if(!invalid_code[head->vaddr>>12]) {
1326 // Don't restore blocks which are about to expire from the cache
1327 if (doesnt_expire_soon(head->addr)) {
1328 if(verify_dirty(head->addr)) {
1329 u_char *start, *end;
1330 //printf("Possibly Restore %x (%p)\n",head->vaddr, head->addr);
1333 get_bounds(head->addr, &start, &end);
1334 if (start - rdram < RAM_SIZE) {
1335 for (i = (start-rdram+0x80000000)>>12; i <= (end-1-rdram+0x80000000)>>12; i++) {
1336 inv|=invalid_code[i];
1339 else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1343 void *clean_addr = get_clean_addr(head->addr);
1344 if (doesnt_expire_soon(clean_addr)) {
1346 inv_debug("INV: Restored %x (%p/%p)\n",head->vaddr, head->addr, clean_addr);
1347 //printf("page=%x, addr=%x\n",page,head->vaddr);
1348 //assert(head->vaddr>>12==(page|0x80000));
1349 ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1350 struct ht_entry *ht_bin = hash_table_get(head->vaddr);
1351 if (ht_bin->vaddr[0] == head->vaddr)
1352 ht_bin->tcaddr[0] = clean_addr; // Replace existing entry
1353 if (ht_bin->vaddr[1] == head->vaddr)
1354 ht_bin->tcaddr[1] = clean_addr; // Replace existing entry
1364 /* Register allocation */
1366 // Note: registers are allocated clean (unmodified state)
1367 // if you intend to modify the register, you must call dirty_reg().
1368 static void alloc_reg(struct regstat *cur,int i,signed char reg)
1371 int preferred_reg = (reg&7);
1372 if(reg==CCREG) preferred_reg=HOST_CCREG;
1373 if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
1375 // Don't allocate unused registers
1376 if((cur->u>>reg)&1) return;
1378 // see if it's already allocated
1379 for(hr=0;hr<HOST_REGS;hr++)
1381 if(cur->regmap[hr]==reg) return;
1384 // Keep the same mapping if the register was already allocated in a loop
1385 preferred_reg = loop_reg(i,reg,preferred_reg);
1387 // Try to allocate the preferred register
1388 if(cur->regmap[preferred_reg]==-1) {
1389 cur->regmap[preferred_reg]=reg;
1390 cur->dirty&=~(1<<preferred_reg);
1391 cur->isconst&=~(1<<preferred_reg);
1394 r=cur->regmap[preferred_reg];
1397 cur->regmap[preferred_reg]=reg;
1398 cur->dirty&=~(1<<preferred_reg);
1399 cur->isconst&=~(1<<preferred_reg);
1403 // Clear any unneeded registers
1404 // We try to keep the mapping consistent, if possible, because it
1405 // makes branches easier (especially loops). So we try to allocate
1406 // first (see above) before removing old mappings. If this is not
1407 // possible then go ahead and clear out the registers that are no
1409 for(hr=0;hr<HOST_REGS;hr++)
1414 if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1417 // Try to allocate any available register, but prefer
1418 // registers that have not been used recently.
1420 for(hr=0;hr<HOST_REGS;hr++) {
1421 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1422 if(regs[i-1].regmap[hr]!=rs1[i-1]&®s[i-1].regmap[hr]!=rs2[i-1]&®s[i-1].regmap[hr]!=rt1[i-1]&®s[i-1].regmap[hr]!=rt2[i-1]) {
1423 cur->regmap[hr]=reg;
1424 cur->dirty&=~(1<<hr);
1425 cur->isconst&=~(1<<hr);
1431 // Try to allocate any available register
1432 for(hr=0;hr<HOST_REGS;hr++) {
1433 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1434 cur->regmap[hr]=reg;
1435 cur->dirty&=~(1<<hr);
1436 cur->isconst&=~(1<<hr);
1441 // Ok, now we have to evict someone
1442 // Pick a register we hopefully won't need soon
1443 u_char hsn[MAXREG+1];
1444 memset(hsn,10,sizeof(hsn));
1446 lsn(hsn,i,&preferred_reg);
1447 //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
1448 //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1450 // Don't evict the cycle count at entry points, otherwise the entry
1451 // stub will have to write it.
1452 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1453 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1456 // Alloc preferred register if available
1457 if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
1458 for(hr=0;hr<HOST_REGS;hr++) {
1459 // Evict both parts of a 64-bit register
1460 if((cur->regmap[hr]&63)==r) {
1462 cur->dirty&=~(1<<hr);
1463 cur->isconst&=~(1<<hr);
1466 cur->regmap[preferred_reg]=reg;
1469 for(r=1;r<=MAXREG;r++)
1471 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1472 for(hr=0;hr<HOST_REGS;hr++) {
1473 if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1474 if(cur->regmap[hr]==r) {
1475 cur->regmap[hr]=reg;
1476 cur->dirty&=~(1<<hr);
1477 cur->isconst&=~(1<<hr);
1488 for(r=1;r<=MAXREG;r++)
1491 for(hr=0;hr<HOST_REGS;hr++) {
1492 if(cur->regmap[hr]==r) {
1493 cur->regmap[hr]=reg;
1494 cur->dirty&=~(1<<hr);
1495 cur->isconst&=~(1<<hr);
1502 SysPrintf("This shouldn't happen (alloc_reg)");abort();
1505 // Allocate a temporary register. This is done without regard to
1506 // dirty status or whether the register we request is on the unneeded list
1507 // Note: This will only allocate one register, even if called multiple times
1508 static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1511 int preferred_reg = -1;
1513 // see if it's already allocated
1514 for(hr=0;hr<HOST_REGS;hr++)
1516 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
1519 // Try to allocate any available register
1520 for(hr=HOST_REGS-1;hr>=0;hr--) {
1521 if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1522 cur->regmap[hr]=reg;
1523 cur->dirty&=~(1<<hr);
1524 cur->isconst&=~(1<<hr);
1529 // Find an unneeded register
1530 for(hr=HOST_REGS-1;hr>=0;hr--)
1536 if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1537 cur->regmap[hr]=reg;
1538 cur->dirty&=~(1<<hr);
1539 cur->isconst&=~(1<<hr);
1546 // Ok, now we have to evict someone
1547 // Pick a register we hopefully won't need soon
1548 // TODO: we might want to follow unconditional jumps here
1549 // TODO: get rid of dupe code and make this into a function
1550 u_char hsn[MAXREG+1];
1551 memset(hsn,10,sizeof(hsn));
1553 lsn(hsn,i,&preferred_reg);
1554 //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1556 // Don't evict the cycle count at entry points, otherwise the entry
1557 // stub will have to write it.
1558 if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1559 if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1562 for(r=1;r<=MAXREG;r++)
1564 if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1565 for(hr=0;hr<HOST_REGS;hr++) {
1566 if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1567 if(cur->regmap[hr]==r) {
1568 cur->regmap[hr]=reg;
1569 cur->dirty&=~(1<<hr);
1570 cur->isconst&=~(1<<hr);
1581 for(r=1;r<=MAXREG;r++)
1584 for(hr=0;hr<HOST_REGS;hr++) {
1585 if(cur->regmap[hr]==r) {
1586 cur->regmap[hr]=reg;
1587 cur->dirty&=~(1<<hr);
1588 cur->isconst&=~(1<<hr);
1595 SysPrintf("This shouldn't happen");abort();
1598 static void mov_alloc(struct regstat *current,int i)
1600 // Note: Don't need to actually alloc the source registers
1601 //alloc_reg(current,i,rs1[i]);
1602 alloc_reg(current,i,rt1[i]);
1604 clear_const(current,rs1[i]);
1605 clear_const(current,rt1[i]);
1606 dirty_reg(current,rt1[i]);
1609 static void shiftimm_alloc(struct regstat *current,int i)
1611 if(opcode2[i]<=0x3) // SLL/SRL/SRA
1614 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1616 alloc_reg(current,i,rt1[i]);
1617 dirty_reg(current,rt1[i]);
1618 if(is_const(current,rs1[i])) {
1619 int v=get_const(current,rs1[i]);
1620 if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1621 if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1622 if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1624 else clear_const(current,rt1[i]);
1629 clear_const(current,rs1[i]);
1630 clear_const(current,rt1[i]);
1633 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1637 if(opcode2[i]==0x3c) // DSLL32
1641 if(opcode2[i]==0x3e) // DSRL32
1645 if(opcode2[i]==0x3f) // DSRA32
1651 static void shift_alloc(struct regstat *current,int i)
1654 if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1656 if(rs1[i]) alloc_reg(current,i,rs1[i]);
1657 if(rs2[i]) alloc_reg(current,i,rs2[i]);
1658 alloc_reg(current,i,rt1[i]);
1659 if(rt1[i]==rs2[i]) {
1660 alloc_reg_temp(current,i,-1);
1661 minimum_free_regs[i]=1;
1663 } else { // DSLLV/DSRLV/DSRAV
1666 clear_const(current,rs1[i]);
1667 clear_const(current,rs2[i]);
1668 clear_const(current,rt1[i]);
1669 dirty_reg(current,rt1[i]);
1673 static void alu_alloc(struct regstat *current,int i)
1675 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1677 if(rs1[i]&&rs2[i]) {
1678 alloc_reg(current,i,rs1[i]);
1679 alloc_reg(current,i,rs2[i]);
1682 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1683 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1685 alloc_reg(current,i,rt1[i]);
1688 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1690 alloc_reg(current,i,rs1[i]);
1691 alloc_reg(current,i,rs2[i]);
1692 alloc_reg(current,i,rt1[i]);
1695 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1697 if(rs1[i]&&rs2[i]) {
1698 alloc_reg(current,i,rs1[i]);
1699 alloc_reg(current,i,rs2[i]);
1703 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1704 if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1706 alloc_reg(current,i,rt1[i]);
1709 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1712 clear_const(current,rs1[i]);
1713 clear_const(current,rs2[i]);
1714 clear_const(current,rt1[i]);
1715 dirty_reg(current,rt1[i]);
1718 static void imm16_alloc(struct regstat *current,int i)
1720 if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1722 if(rt1[i]) alloc_reg(current,i,rt1[i]);
1723 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1726 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1727 clear_const(current,rs1[i]);
1728 clear_const(current,rt1[i]);
1730 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1731 if(is_const(current,rs1[i])) {
1732 int v=get_const(current,rs1[i]);
1733 if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1734 if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1735 if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1737 else clear_const(current,rt1[i]);
1739 else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1740 if(is_const(current,rs1[i])) {
1741 int v=get_const(current,rs1[i]);
1742 set_const(current,rt1[i],v+imm[i]);
1744 else clear_const(current,rt1[i]);
1747 set_const(current,rt1[i],imm[i]<<16); // LUI
1749 dirty_reg(current,rt1[i]);
1752 static void load_alloc(struct regstat *current,int i)
1754 clear_const(current,rt1[i]);
1755 //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1756 if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1757 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1758 if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1759 alloc_reg(current,i,rt1[i]);
1760 assert(get_reg(current->regmap,rt1[i])>=0);
1761 if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1765 else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1769 dirty_reg(current,rt1[i]);
1770 // LWL/LWR need a temporary register for the old value
1771 if(opcode[i]==0x22||opcode[i]==0x26)
1773 alloc_reg(current,i,FTEMP);
1774 alloc_reg_temp(current,i,-1);
1775 minimum_free_regs[i]=1;
1780 // Load to r0 or unneeded register (dummy load)
1781 // but we still need a register to calculate the address
1782 if(opcode[i]==0x22||opcode[i]==0x26)
1784 alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1786 alloc_reg_temp(current,i,-1);
1787 minimum_free_regs[i]=1;
1788 if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1795 void store_alloc(struct regstat *current,int i)
1797 clear_const(current,rs2[i]);
1798 if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1799 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1800 alloc_reg(current,i,rs2[i]);
1801 if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1804 #if defined(HOST_IMM8)
1805 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1806 else alloc_reg(current,i,INVCP);
1808 if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1809 alloc_reg(current,i,FTEMP);
1811 // We need a temporary register for address generation
1812 alloc_reg_temp(current,i,-1);
1813 minimum_free_regs[i]=1;
1816 void c1ls_alloc(struct regstat *current,int i)
1818 //clear_const(current,rs1[i]); // FIXME
1819 clear_const(current,rt1[i]);
1820 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1821 alloc_reg(current,i,CSREG); // Status
1822 alloc_reg(current,i,FTEMP);
1823 if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1826 #if defined(HOST_IMM8)
1827 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1828 else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1829 alloc_reg(current,i,INVCP);
1831 // We need a temporary register for address generation
1832 alloc_reg_temp(current,i,-1);
1835 void c2ls_alloc(struct regstat *current,int i)
1837 clear_const(current,rt1[i]);
1838 if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1839 alloc_reg(current,i,FTEMP);
1840 #if defined(HOST_IMM8)
1841 // On CPUs without 32-bit immediates we need a pointer to invalid_code
1842 if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1843 alloc_reg(current,i,INVCP);
1845 // We need a temporary register for address generation
1846 alloc_reg_temp(current,i,-1);
1847 minimum_free_regs[i]=1;
1850 #ifndef multdiv_alloc
1851 void multdiv_alloc(struct regstat *current,int i)
1858 // case 0x1D: DMULTU
1861 clear_const(current,rs1[i]);
1862 clear_const(current,rs2[i]);
1865 if((opcode2[i]&4)==0) // 32-bit
1867 current->u&=~(1LL<<HIREG);
1868 current->u&=~(1LL<<LOREG);
1869 alloc_reg(current,i,HIREG);
1870 alloc_reg(current,i,LOREG);
1871 alloc_reg(current,i,rs1[i]);
1872 alloc_reg(current,i,rs2[i]);
1873 dirty_reg(current,HIREG);
1874 dirty_reg(current,LOREG);
1883 // Multiply by zero is zero.
1884 // MIPS does not have a divide by zero exception.
1885 // The result is undefined, we return zero.
1886 alloc_reg(current,i,HIREG);
1887 alloc_reg(current,i,LOREG);
1888 dirty_reg(current,HIREG);
1889 dirty_reg(current,LOREG);
1894 void cop0_alloc(struct regstat *current,int i)
1896 if(opcode2[i]==0) // MFC0
1899 clear_const(current,rt1[i]);
1900 alloc_all(current,i);
1901 alloc_reg(current,i,rt1[i]);
1902 dirty_reg(current,rt1[i]);
1905 else if(opcode2[i]==4) // MTC0
1908 clear_const(current,rs1[i]);
1909 alloc_reg(current,i,rs1[i]);
1910 alloc_all(current,i);
1913 alloc_all(current,i); // FIXME: Keep r0
1915 alloc_reg(current,i,0);
1920 // TLBR/TLBWI/TLBWR/TLBP/ERET
1921 assert(opcode2[i]==0x10);
1922 alloc_all(current,i);
1924 minimum_free_regs[i]=HOST_REGS;
1927 static void cop2_alloc(struct regstat *current,int i)
1929 if (opcode2[i] < 3) // MFC2/CFC2
1931 alloc_cc(current,i); // for stalls
1932 dirty_reg(current,CCREG);
1934 clear_const(current,rt1[i]);
1935 alloc_reg(current,i,rt1[i]);
1936 dirty_reg(current,rt1[i]);
1939 else if (opcode2[i] > 3) // MTC2/CTC2
1942 clear_const(current,rs1[i]);
1943 alloc_reg(current,i,rs1[i]);
1947 alloc_reg(current,i,0);
1950 alloc_reg_temp(current,i,-1);
1951 minimum_free_regs[i]=1;
1954 void c2op_alloc(struct regstat *current,int i)
1956 alloc_cc(current,i); // for stalls
1957 dirty_reg(current,CCREG);
1958 alloc_reg_temp(current,i,-1);
1961 void syscall_alloc(struct regstat *current,int i)
1963 alloc_cc(current,i);
1964 dirty_reg(current,CCREG);
1965 alloc_all(current,i);
1966 minimum_free_regs[i]=HOST_REGS;
1970 void delayslot_alloc(struct regstat *current,int i)
1980 assem_debug("jump in the delay slot. this shouldn't happen.\n");//abort();
1981 SysPrintf("Disabled speculative precompilation\n");
1985 imm16_alloc(current,i);
1989 load_alloc(current,i);
1993 store_alloc(current,i);
1996 alu_alloc(current,i);
1999 shift_alloc(current,i);
2002 multdiv_alloc(current,i);
2005 shiftimm_alloc(current,i);
2008 mov_alloc(current,i);
2011 cop0_alloc(current,i);
2016 cop2_alloc(current,i);
2019 c1ls_alloc(current,i);
2022 c2ls_alloc(current,i);
2025 c2op_alloc(current,i);
2030 // Special case where a branch and delay slot span two pages in virtual memory
2031 static void pagespan_alloc(struct regstat *current,int i)
2034 current->wasconst=0;
2036 minimum_free_regs[i]=HOST_REGS;
2037 alloc_all(current,i);
2038 alloc_cc(current,i);
2039 dirty_reg(current,CCREG);
2040 if(opcode[i]==3) // JAL
2042 alloc_reg(current,i,31);
2043 dirty_reg(current,31);
2045 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2047 alloc_reg(current,i,rs1[i]);
2049 alloc_reg(current,i,rt1[i]);
2050 dirty_reg(current,rt1[i]);
2053 if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2055 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2056 if(rs2[i]) alloc_reg(current,i,rs2[i]);
2059 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2061 if(rs1[i]) alloc_reg(current,i,rs1[i]);
2066 static void add_stub(enum stub_type type, void *addr, void *retaddr,
2067 u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
2069 assert(stubcount < ARRAY_SIZE(stubs));
2070 stubs[stubcount].type = type;
2071 stubs[stubcount].addr = addr;
2072 stubs[stubcount].retaddr = retaddr;
2073 stubs[stubcount].a = a;
2074 stubs[stubcount].b = b;
2075 stubs[stubcount].c = c;
2076 stubs[stubcount].d = d;
2077 stubs[stubcount].e = e;
2081 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
2082 int i, int addr_reg, const struct regstat *i_regs, int ccadj, u_int reglist)
2084 add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
2087 // Write out a single register
2088 static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
2091 for(hr=0;hr<HOST_REGS;hr++) {
2092 if(hr!=EXCLUDE_REG) {
2093 if((regmap[hr]&63)==r) {
2095 assert(regmap[hr]<64);
2096 emit_storereg(r,hr);
2103 static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
2105 //if(dirty_pre==dirty) return;
2107 for(hr=0;hr<HOST_REGS;hr++) {
2108 if(hr!=EXCLUDE_REG) {
2110 if(((~u)>>(reg&63))&1) {
2112 if(((dirty_pre&~dirty)>>hr)&1) {
2114 emit_storereg(reg,hr);
2127 static void pass_args(int a0, int a1)
2131 emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2133 else if(a0!=0&&a1==0) {
2135 if (a0>=0) emit_mov(a0,0);
2138 if(a0>=0&&a0!=0) emit_mov(a0,0);
2139 if(a1>=0&&a1!=1) emit_mov(a1,1);
2143 static void alu_assemble(int i,struct regstat *i_regs)
2145 if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2147 signed char s1,s2,t;
2148 t=get_reg(i_regs->regmap,rt1[i]);
2150 s1=get_reg(i_regs->regmap,rs1[i]);
2151 s2=get_reg(i_regs->regmap,rs2[i]);
2152 if(rs1[i]&&rs2[i]) {
2155 if(opcode2[i]&2) emit_sub(s1,s2,t);
2156 else emit_add(s1,s2,t);
2159 if(s1>=0) emit_mov(s1,t);
2160 else emit_loadreg(rs1[i],t);
2164 if(opcode2[i]&2) emit_neg(s2,t);
2165 else emit_mov(s2,t);
2168 emit_loadreg(rs2[i],t);
2169 if(opcode2[i]&2) emit_neg(t,t);
2172 else emit_zeroreg(t);
2176 if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2179 if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2181 signed char s1l,s2l,t;
2183 t=get_reg(i_regs->regmap,rt1[i]);
2186 s1l=get_reg(i_regs->regmap,rs1[i]);
2187 s2l=get_reg(i_regs->regmap,rs2[i]);
2188 if(rs2[i]==0) // rx<r0
2190 if(opcode2[i]==0x2a&&rs1[i]!=0) { // SLT
2192 emit_shrimm(s1l,31,t);
2194 else // SLTU (unsigned can not be less than zero, 0<0)
2197 else if(rs1[i]==0) // r0<rx
2200 if(opcode2[i]==0x2a) // SLT
2201 emit_set_gz32(s2l,t);
2202 else // SLTU (set if not zero)
2203 emit_set_nz32(s2l,t);
2206 assert(s1l>=0);assert(s2l>=0);
2207 if(opcode2[i]==0x2a) // SLT
2208 emit_set_if_less32(s1l,s2l,t);
2210 emit_set_if_carry32(s1l,s2l,t);
2216 if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2218 signed char s1l,s2l,tl;
2219 tl=get_reg(i_regs->regmap,rt1[i]);
2222 s1l=get_reg(i_regs->regmap,rs1[i]);
2223 s2l=get_reg(i_regs->regmap,rs2[i]);
2224 if(rs1[i]&&rs2[i]) {
2227 if(opcode2[i]==0x24) { // AND
2228 emit_and(s1l,s2l,tl);
2230 if(opcode2[i]==0x25) { // OR
2231 emit_or(s1l,s2l,tl);
2233 if(opcode2[i]==0x26) { // XOR
2234 emit_xor(s1l,s2l,tl);
2236 if(opcode2[i]==0x27) { // NOR
2237 emit_or(s1l,s2l,tl);
2243 if(opcode2[i]==0x24) { // AND
2246 if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2248 if(s1l>=0) emit_mov(s1l,tl);
2249 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2253 if(s2l>=0) emit_mov(s2l,tl);
2254 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2256 else emit_zeroreg(tl);
2258 if(opcode2[i]==0x27) { // NOR
2260 if(s1l>=0) emit_not(s1l,tl);
2262 emit_loadreg(rs1[i],tl);
2268 if(s2l>=0) emit_not(s2l,tl);
2270 emit_loadreg(rs2[i],tl);
2274 else emit_movimm(-1,tl);
2283 void imm16_assemble(int i,struct regstat *i_regs)
2285 if (opcode[i]==0x0f) { // LUI
2288 t=get_reg(i_regs->regmap,rt1[i]);
2291 if(!((i_regs->isconst>>t)&1))
2292 emit_movimm(imm[i]<<16,t);
2296 if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2299 t=get_reg(i_regs->regmap,rt1[i]);
2300 s=get_reg(i_regs->regmap,rs1[i]);
2305 if(!((i_regs->isconst>>t)&1)) {
2307 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2308 emit_addimm(t,imm[i],t);
2310 if(!((i_regs->wasconst>>s)&1))
2311 emit_addimm(s,imm[i],t);
2313 emit_movimm(constmap[i][s]+imm[i],t);
2319 if(!((i_regs->isconst>>t)&1))
2320 emit_movimm(imm[i],t);
2325 if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2328 tl=get_reg(i_regs->regmap,rt1[i]);
2329 sl=get_reg(i_regs->regmap,rs1[i]);
2333 emit_addimm(sl,imm[i],tl);
2335 emit_movimm(imm[i],tl);
2340 else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2342 //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2344 t=get_reg(i_regs->regmap,rt1[i]);
2345 sl=get_reg(i_regs->regmap,rs1[i]);
2349 if(opcode[i]==0x0a) { // SLTI
2351 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2352 emit_slti32(t,imm[i],t);
2354 emit_slti32(sl,imm[i],t);
2359 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2360 emit_sltiu32(t,imm[i],t);
2362 emit_sltiu32(sl,imm[i],t);
2366 // SLTI(U) with r0 is just stupid,
2367 // nonetheless examples can be found
2368 if(opcode[i]==0x0a) // SLTI
2369 if(0<imm[i]) emit_movimm(1,t);
2370 else emit_zeroreg(t);
2373 if(imm[i]) emit_movimm(1,t);
2374 else emit_zeroreg(t);
2380 else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2383 tl=get_reg(i_regs->regmap,rt1[i]);
2384 sl=get_reg(i_regs->regmap,rs1[i]);
2385 if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2386 if(opcode[i]==0x0c) //ANDI
2390 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2391 emit_andimm(tl,imm[i],tl);
2393 if(!((i_regs->wasconst>>sl)&1))
2394 emit_andimm(sl,imm[i],tl);
2396 emit_movimm(constmap[i][sl]&imm[i],tl);
2406 if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2408 if(opcode[i]==0x0d) { // ORI
2410 emit_orimm(tl,imm[i],tl);
2412 if(!((i_regs->wasconst>>sl)&1))
2413 emit_orimm(sl,imm[i],tl);
2415 emit_movimm(constmap[i][sl]|imm[i],tl);
2418 if(opcode[i]==0x0e) { // XORI
2420 emit_xorimm(tl,imm[i],tl);
2422 if(!((i_regs->wasconst>>sl)&1))
2423 emit_xorimm(sl,imm[i],tl);
2425 emit_movimm(constmap[i][sl]^imm[i],tl);
2430 emit_movimm(imm[i],tl);
2438 void shiftimm_assemble(int i,struct regstat *i_regs)
2440 if(opcode2[i]<=0x3) // SLL/SRL/SRA
2444 t=get_reg(i_regs->regmap,rt1[i]);
2445 s=get_reg(i_regs->regmap,rs1[i]);
2447 if(t>=0&&!((i_regs->isconst>>t)&1)){
2454 if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2456 if(opcode2[i]==0) // SLL
2458 emit_shlimm(s<0?t:s,imm[i],t);
2460 if(opcode2[i]==2) // SRL
2462 emit_shrimm(s<0?t:s,imm[i],t);
2464 if(opcode2[i]==3) // SRA
2466 emit_sarimm(s<0?t:s,imm[i],t);
2470 if(s>=0 && s!=t) emit_mov(s,t);
2474 //emit_storereg(rt1[i],t); //DEBUG
2477 if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2481 if(opcode2[i]==0x3c) // DSLL32
2485 if(opcode2[i]==0x3e) // DSRL32
2489 if(opcode2[i]==0x3f) // DSRA32
2495 #ifndef shift_assemble
2496 static void shift_assemble(int i,struct regstat *i_regs)
2498 signed char s,t,shift;
2501 assert(opcode2[i]<=0x07); // SLLV/SRLV/SRAV
2502 t = get_reg(i_regs->regmap, rt1[i]);
2503 s = get_reg(i_regs->regmap, rs1[i]);
2504 shift = get_reg(i_regs->regmap, rs2[i]);
2510 else if(rs2[i]==0) {
2512 if(s!=t) emit_mov(s,t);
2515 host_tempreg_acquire();
2516 emit_andimm(shift,31,HOST_TEMPREG);
2517 switch(opcode2[i]) {
2519 emit_shl(s,HOST_TEMPREG,t);
2522 emit_shr(s,HOST_TEMPREG,t);
2525 emit_sar(s,HOST_TEMPREG,t);
2530 host_tempreg_release();
2544 static int get_ptr_mem_type(u_int a)
2546 if(a < 0x00200000) {
2547 if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2548 // return wrong, must use memhandler for BIOS self-test to pass
2549 // 007 does similar stuff from a00 mirror, weird stuff
2553 if(0x1f800000 <= a && a < 0x1f801000)
2555 if(0x80200000 <= a && a < 0x80800000)
2557 if(0xa0000000 <= a && a < 0xa0200000)
2562 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
2567 if(((smrv_strong|smrv_weak)>>mr)&1) {
2568 type=get_ptr_mem_type(smrv[mr]);
2569 //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2572 // use the mirror we are running on
2573 type=get_ptr_mem_type(start);
2574 //printf("set nospec @%08x r%d %d\n", start+i*4, mr, type);
2577 if(type==MTYPE_8020) { // RAM 80200000+ mirror
2578 host_tempreg_acquire();
2579 emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2580 addr=*addr_reg_override=HOST_TEMPREG;
2583 else if(type==MTYPE_0000) { // RAM 0 mirror
2584 host_tempreg_acquire();
2585 emit_orimm(addr,0x80000000,HOST_TEMPREG);
2586 addr=*addr_reg_override=HOST_TEMPREG;
2589 else if(type==MTYPE_A000) { // RAM A mirror
2590 host_tempreg_acquire();
2591 emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2592 addr=*addr_reg_override=HOST_TEMPREG;
2595 else if(type==MTYPE_1F80) { // scratchpad
2596 if (psxH == (void *)0x1f800000) {
2597 host_tempreg_acquire();
2598 emit_xorimm(addr,0x1f800000,HOST_TEMPREG);
2599 emit_cmpimm(HOST_TEMPREG,0x1000);
2600 host_tempreg_release();
2605 // do the usual RAM check, jump will go to the right handler
2612 emit_cmpimm(addr,RAM_SIZE);
2614 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2615 // Hint to branch predictor that the branch is unlikely to be taken
2617 emit_jno_unlikely(0);
2622 host_tempreg_acquire();
2623 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2624 addr=*addr_reg_override=HOST_TEMPREG;
2631 // return memhandler, or get directly accessable address and return 0
2632 static void *get_direct_memhandler(void *table, u_int addr,
2633 enum stub_type type, uintptr_t *addr_host)
2635 uintptr_t l1, l2 = 0;
2636 l1 = ((uintptr_t *)table)[addr>>12];
2637 if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
2638 uintptr_t v = l1 << 1;
2639 *addr_host = v + addr;
2644 if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2645 l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2646 else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2647 l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2649 l2=((uintptr_t *)l1)[(addr&0xfff)/4];
2650 if ((l2 & (1<<31)) == 0) {
2651 uintptr_t v = l2 << 1;
2652 *addr_host = v + (addr&0xfff);
2655 return (void *)(l2 << 1);
2659 static u_int get_host_reglist(const signed char *regmap)
2661 u_int reglist = 0, hr;
2662 for (hr = 0; hr < HOST_REGS; hr++) {
2663 if (hr != EXCLUDE_REG && regmap[hr] >= 0)
2669 static u_int reglist_exclude(u_int reglist, int r1, int r2)
2672 reglist &= ~(1u << r1);
2674 reglist &= ~(1u << r2);
2678 static void load_assemble(int i, const struct regstat *i_regs)
2683 int memtarget=0,c=0;
2684 int fastio_reg_override=-1;
2685 u_int reglist=get_host_reglist(i_regs->regmap);
2686 tl=get_reg(i_regs->regmap,rt1[i]);
2687 s=get_reg(i_regs->regmap,rs1[i]);
2689 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2691 c=(i_regs->wasconst>>s)&1;
2693 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2696 //printf("load_assemble: c=%d\n",c);
2697 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2698 // FIXME: Even if the load is a NOP, we should check for pagefaults...
2699 if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2701 // could be FIFO, must perform the read
2703 assem_debug("(forced read)\n");
2704 tl=get_reg(i_regs->regmap,-1);
2707 if(offset||s<0||c) addr=tl;
2709 //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2711 //printf("load_assemble: c=%d\n",c);
2712 //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2713 assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2717 // Strmnnrmn's speed hack
2718 if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2721 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
2724 else if(ram_offset&&memtarget) {
2725 host_tempreg_acquire();
2726 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2727 fastio_reg_override=HOST_TEMPREG;
2729 int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2730 if (opcode[i]==0x20) { // LB
2736 if(fastio_reg_override>=0) a=fastio_reg_override;
2738 emit_movsbl_indexed(x,a,tl);
2742 add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2745 inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2747 if (opcode[i]==0x21) { // LH
2752 if(fastio_reg_override>=0) a=fastio_reg_override;
2753 emit_movswl_indexed(x,a,tl);
2756 add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2759 inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2761 if (opcode[i]==0x23) { // LW
2765 if(fastio_reg_override>=0) a=fastio_reg_override;
2766 emit_readword_indexed(0,a,tl);
2769 add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2772 inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2774 if (opcode[i]==0x24) { // LBU
2779 if(fastio_reg_override>=0) a=fastio_reg_override;
2781 emit_movzbl_indexed(x,a,tl);
2784 add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2787 inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2789 if (opcode[i]==0x25) { // LHU
2794 if(fastio_reg_override>=0) a=fastio_reg_override;
2795 emit_movzwl_indexed(x,a,tl);
2798 add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2801 inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2803 if (opcode[i]==0x27) { // LWU
2806 if (opcode[i]==0x37) { // LD
2810 if (fastio_reg_override == HOST_TEMPREG)
2811 host_tempreg_release();
2814 #ifndef loadlr_assemble
2815 static void loadlr_assemble(int i, const struct regstat *i_regs)
2817 int s,tl,temp,temp2,addr;
2820 int memtarget=0,c=0;
2821 int fastio_reg_override=-1;
2822 u_int reglist=get_host_reglist(i_regs->regmap);
2823 tl=get_reg(i_regs->regmap,rt1[i]);
2824 s=get_reg(i_regs->regmap,rs1[i]);
2825 temp=get_reg(i_regs->regmap,-1);
2826 temp2=get_reg(i_regs->regmap,FTEMP);
2827 addr=get_reg(i_regs->regmap,AGEN1+(i&1));
2831 if(offset||s<0||c) addr=temp2;
2834 c=(i_regs->wasconst>>s)&1;
2836 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2840 emit_shlimm(addr,3,temp);
2841 if (opcode[i]==0x22||opcode[i]==0x26) {
2842 emit_andimm(addr,0xFFFFFFFC,temp2); // LWL/LWR
2844 emit_andimm(addr,0xFFFFFFF8,temp2); // LDL/LDR
2846 jaddr=emit_fastpath_cmp_jump(i,temp2,&fastio_reg_override);
2849 if(ram_offset&&memtarget) {
2850 host_tempreg_acquire();
2851 emit_addimm(temp2,ram_offset,HOST_TEMPREG);
2852 fastio_reg_override=HOST_TEMPREG;
2854 if (opcode[i]==0x22||opcode[i]==0x26) {
2855 emit_movimm(((constmap[i][s]+offset)<<3)&24,temp); // LWL/LWR
2857 emit_movimm(((constmap[i][s]+offset)<<3)&56,temp); // LDL/LDR
2860 if (opcode[i]==0x22||opcode[i]==0x26) { // LWL/LWR
2863 if(fastio_reg_override>=0) a=fastio_reg_override;
2864 emit_readword_indexed(0,a,temp2);
2865 if(fastio_reg_override==HOST_TEMPREG) host_tempreg_release();
2866 if(jaddr) add_stub_r(LOADW_STUB,jaddr,out,i,temp2,i_regs,ccadj[i],reglist);
2869 inline_readstub(LOADW_STUB,i,(constmap[i][s]+offset)&0xFFFFFFFC,i_regs->regmap,FTEMP,ccadj[i],reglist);
2872 emit_andimm(temp,24,temp);
2873 if (opcode[i]==0x22) // LWL
2874 emit_xorimm(temp,24,temp);
2875 host_tempreg_acquire();
2876 emit_movimm(-1,HOST_TEMPREG);
2877 if (opcode[i]==0x26) {
2878 emit_shr(temp2,temp,temp2);
2879 emit_bic_lsr(tl,HOST_TEMPREG,temp,tl);
2881 emit_shl(temp2,temp,temp2);
2882 emit_bic_lsl(tl,HOST_TEMPREG,temp,tl);
2884 host_tempreg_release();
2885 emit_or(temp2,tl,tl);
2887 //emit_storereg(rt1[i],tl); // DEBUG
2889 if (opcode[i]==0x1A||opcode[i]==0x1B) { // LDL/LDR
2895 void store_assemble(int i, const struct regstat *i_regs)
2901 enum stub_type type;
2902 int memtarget=0,c=0;
2903 int agr=AGEN1+(i&1);
2904 int fastio_reg_override=-1;
2905 u_int reglist=get_host_reglist(i_regs->regmap);
2906 tl=get_reg(i_regs->regmap,rs2[i]);
2907 s=get_reg(i_regs->regmap,rs1[i]);
2908 temp=get_reg(i_regs->regmap,agr);
2909 if(temp<0) temp=get_reg(i_regs->regmap,-1);
2912 c=(i_regs->wasconst>>s)&1;
2914 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2919 if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2920 if(offset||s<0||c) addr=temp;
2923 jaddr=emit_fastpath_cmp_jump(i,addr,&fastio_reg_override);
2925 else if(ram_offset&&memtarget) {
2926 host_tempreg_acquire();
2927 emit_addimm(addr,ram_offset,HOST_TEMPREG);
2928 fastio_reg_override=HOST_TEMPREG;
2931 if (opcode[i]==0x28) { // SB
2935 if(fastio_reg_override>=0) a=fastio_reg_override;
2936 emit_writebyte_indexed(tl,x,a);
2940 if (opcode[i]==0x29) { // SH
2944 if(fastio_reg_override>=0) a=fastio_reg_override;
2945 emit_writehword_indexed(tl,x,a);
2949 if (opcode[i]==0x2B) { // SW
2952 if(fastio_reg_override>=0) a=fastio_reg_override;
2953 emit_writeword_indexed(tl,0,a);
2957 if (opcode[i]==0x3F) { // SD
2961 if(fastio_reg_override==HOST_TEMPREG)
2962 host_tempreg_release();
2964 // PCSX store handlers don't check invcode again
2966 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2969 if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
2971 #ifdef DESTRUCTIVE_SHIFT
2972 // The x86 shift operation is 'destructive'; it overwrites the
2973 // source register, so we need to make a copy first and use that.
2976 #if defined(HOST_IMM8)
2977 int ir=get_reg(i_regs->regmap,INVCP);
2979 emit_cmpmem_indexedsr12_reg(ir,addr,1);
2981 emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
2983 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2984 emit_callne(invalidate_addr_reg[addr]);
2988 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2992 u_int addr_val=constmap[i][s]+offset;
2994 add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2995 } else if(c&&!memtarget) {
2996 inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
2998 // basic current block modification detection..
2999 // not looking back as that should be in mips cache already
3000 // (see Spyro2 title->attract mode)
3001 if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3002 SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3003 assert(i_regs->regmap==regs[i].regmap); // not delay slot
3004 if(i_regs->regmap==regs[i].regmap) {
3005 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3006 wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
3007 emit_movimm(start+i*4+4,0);
3008 emit_writeword(0,&pcaddr);
3009 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3010 emit_far_call(get_addr_ht);
3016 static void storelr_assemble(int i, const struct regstat *i_regs)
3022 void *case1, *case2, *case3;
3023 void *done0, *done1, *done2;
3024 int memtarget=0,c=0;
3025 int agr=AGEN1+(i&1);
3026 u_int reglist=get_host_reglist(i_regs->regmap);
3027 tl=get_reg(i_regs->regmap,rs2[i]);
3028 s=get_reg(i_regs->regmap,rs1[i]);
3029 temp=get_reg(i_regs->regmap,agr);
3030 if(temp<0) temp=get_reg(i_regs->regmap,-1);
3033 c=(i_regs->isconst>>s)&1;
3035 memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3041 emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3042 if(!offset&&s!=temp) emit_mov(s,temp);
3048 if(!memtarget||!rs1[i]) {
3054 emit_addimm_no_flags(ram_offset,temp);
3056 if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3060 emit_xorimm(temp,3,temp);
3061 emit_testimm(temp,2);
3064 emit_testimm(temp,1);
3068 if (opcode[i]==0x2A) { // SWL
3069 emit_writeword_indexed(tl,0,temp);
3071 else if (opcode[i]==0x2E) { // SWR
3072 emit_writebyte_indexed(tl,3,temp);
3079 set_jump_target(case1, out);
3080 if (opcode[i]==0x2A) { // SWL
3081 // Write 3 msb into three least significant bytes
3082 if(rs2[i]) emit_rorimm(tl,8,tl);
3083 emit_writehword_indexed(tl,-1,temp);
3084 if(rs2[i]) emit_rorimm(tl,16,tl);
3085 emit_writebyte_indexed(tl,1,temp);
3086 if(rs2[i]) emit_rorimm(tl,8,tl);
3088 else if (opcode[i]==0x2E) { // SWR
3089 // Write two lsb into two most significant bytes
3090 emit_writehword_indexed(tl,1,temp);
3095 set_jump_target(case2, out);
3096 emit_testimm(temp,1);
3099 if (opcode[i]==0x2A) { // SWL
3100 // Write two msb into two least significant bytes
3101 if(rs2[i]) emit_rorimm(tl,16,tl);
3102 emit_writehword_indexed(tl,-2,temp);
3103 if(rs2[i]) emit_rorimm(tl,16,tl);
3105 else if (opcode[i]==0x2E) { // SWR
3106 // Write 3 lsb into three most significant bytes
3107 emit_writebyte_indexed(tl,-1,temp);
3108 if(rs2[i]) emit_rorimm(tl,8,tl);
3109 emit_writehword_indexed(tl,0,temp);
3110 if(rs2[i]) emit_rorimm(tl,24,tl);
3115 set_jump_target(case3, out);
3116 if (opcode[i]==0x2A) { // SWL
3117 // Write msb into least significant byte
3118 if(rs2[i]) emit_rorimm(tl,24,tl);
3119 emit_writebyte_indexed(tl,-3,temp);
3120 if(rs2[i]) emit_rorimm(tl,8,tl);
3122 else if (opcode[i]==0x2E) { // SWR
3123 // Write entire word
3124 emit_writeword_indexed(tl,-3,temp);
3126 set_jump_target(done0, out);
3127 set_jump_target(done1, out);
3128 set_jump_target(done2, out);
3130 add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
3131 if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
3132 emit_addimm_no_flags(-ram_offset,temp);
3133 #if defined(HOST_IMM8)
3134 int ir=get_reg(i_regs->regmap,INVCP);
3136 emit_cmpmem_indexedsr12_reg(ir,temp,1);
3138 emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
3140 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3141 emit_callne(invalidate_addr_reg[temp]);
3145 add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3150 static void cop0_assemble(int i,struct regstat *i_regs)
3152 if(opcode2[i]==0) // MFC0
3154 signed char t=get_reg(i_regs->regmap,rt1[i]);
3155 u_int copr=(source[i]>>11)&0x1f;
3156 //assert(t>=0); // Why does this happen? OOT is weird
3157 if(t>=0&&rt1[i]!=0) {
3158 emit_readword(®_cop0[copr],t);
3161 else if(opcode2[i]==4) // MTC0
3163 signed char s=get_reg(i_regs->regmap,rs1[i]);
3164 char copr=(source[i]>>11)&0x1f;
3166 wb_register(rs1[i],i_regs->regmap,i_regs->dirty);
3167 if(copr==9||copr==11||copr==12||copr==13) {
3168 emit_readword(&last_count,HOST_TEMPREG);
3169 emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
3170 emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3171 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3172 emit_writeword(HOST_CCREG,&Count);
3174 // What a mess. The status register (12) can enable interrupts,
3175 // so needs a special case to handle a pending interrupt.
3176 // The interrupt must be taken immediately, because a subsequent
3177 // instruction might disable interrupts again.
3178 if(copr==12||copr==13) {
3180 // burn cycles to cause cc_interrupt, which will
3181 // reschedule next_interupt. Relies on CCREG from above.
3182 assem_debug("MTC0 DS %d\n", copr);
3183 emit_writeword(HOST_CCREG,&last_count);
3184 emit_movimm(0,HOST_CCREG);
3185 emit_storereg(CCREG,HOST_CCREG);
3186 emit_loadreg(rs1[i],1);
3187 emit_movimm(copr,0);
3188 emit_far_call(pcsx_mtc0_ds);
3189 emit_loadreg(rs1[i],s);
3192 emit_movimm(start+i*4+4,HOST_TEMPREG);
3193 emit_writeword(HOST_TEMPREG,&pcaddr);
3194 emit_movimm(0,HOST_TEMPREG);
3195 emit_writeword(HOST_TEMPREG,&pending_exception);
3198 emit_loadreg(rs1[i],1);
3201 emit_movimm(copr,0);
3202 emit_far_call(pcsx_mtc0);
3203 if(copr==9||copr==11||copr==12||copr==13) {
3204 emit_readword(&Count,HOST_CCREG);
3205 emit_readword(&next_interupt,HOST_TEMPREG);
3206 emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3207 emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3208 emit_writeword(HOST_TEMPREG,&last_count);
3209 emit_storereg(CCREG,HOST_CCREG);
3211 if(copr==12||copr==13) {
3212 assert(!is_delayslot);
3213 emit_readword(&pending_exception,14);
3217 emit_readword(&pcaddr, 0);
3218 emit_addimm(HOST_CCREG,2,HOST_CCREG);
3219 emit_far_call(get_addr_ht);
3221 set_jump_target(jaddr, out);
3223 emit_loadreg(rs1[i],s);
3227 assert(opcode2[i]==0x10);
3228 //if((source[i]&0x3f)==0x10) // RFE
3230 emit_readword(&Status,0);
3231 emit_andimm(0,0x3c,1);
3232 emit_andimm(0,~0xf,0);
3233 emit_orrshr_imm(1,2,0);
3234 emit_writeword(0,&Status);
3239 static void cop1_unusable(int i,struct regstat *i_regs)
3241 // XXX: should just just do the exception instead
3246 add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0);
3250 static void cop1_assemble(int i,struct regstat *i_regs)
3252 cop1_unusable(i, i_regs);
3255 static void c1ls_assemble(int i,struct regstat *i_regs)
3257 cop1_unusable(i, i_regs);
3261 static void do_cop1stub(int n)
3264 assem_debug("do_cop1stub %x\n",start+stubs[n].a*4);
3265 set_jump_target(stubs[n].addr, out);
3267 // int rs=stubs[n].b;
3268 struct regstat *i_regs=(struct regstat *)stubs[n].c;
3271 load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3272 //if(i_regs!=®s[i]) printf("oops: regs[i]=%x i_regs=%x",(int)®s[i],(int)i_regs);
3274 //else {printf("fp exception in delay slot\n");}
3275 wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
3276 if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
3277 emit_movimm(start+(i-ds)*4,EAX); // Get PC
3278 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right? There should probably be an extra cycle...
3279 emit_far_jump(ds?fp_exception_ds:fp_exception);
3282 // assumes callee-save regs are already saved
3283 static void cop2_call_stall_check(u_int op, int i, const struct regstat *i_regs, u_int reglist)
3285 if (HACK_ENABLED(NDHACK_GTE_NO_STALL))
3287 //assert(get_reg(i_regs->regmap, CCREG) == HOST_CCREG);
3288 if (get_reg(i_regs->regmap, CCREG) != HOST_CCREG) {
3289 // happens occasionally... cc evicted? Don't bother then
3290 //printf("no cc %08x\n", start + i*4);
3293 assem_debug("cop2_call_stall_check\n");
3295 emit_movimm(gte_cycletab[op], 0);
3296 emit_addimm(HOST_CCREG, CLOCK_ADJUST(ccadj[i]), 1);
3297 emit_far_call(call_gteStall);
3298 restore_regs(reglist);
3301 static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3311 emit_readword(®_cop2d[copr],tl);
3312 emit_signextend16(tl,tl);
3313 emit_writeword(tl,®_cop2d[copr]); // hmh
3320 emit_readword(®_cop2d[copr],tl);
3321 emit_andimm(tl,0xffff,tl);
3322 emit_writeword(tl,®_cop2d[copr]);
3325 emit_readword(®_cop2d[14],tl); // SXY2
3326 emit_writeword(tl,®_cop2d[copr]);
3330 c2op_mfc2_29_assemble(tl,temp);
3333 emit_readword(®_cop2d[copr],tl);
3338 static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
3342 emit_readword(®_cop2d[13],temp); // SXY1
3343 emit_writeword(sl,®_cop2d[copr]);
3344 emit_writeword(temp,®_cop2d[12]); // SXY0
3345 emit_readword(®_cop2d[14],temp); // SXY2
3346 emit_writeword(sl,®_cop2d[14]);
3347 emit_writeword(temp,®_cop2d[13]); // SXY1
3350 emit_andimm(sl,0x001f,temp);
3351 emit_shlimm(temp,7,temp);
3352 emit_writeword(temp,®_cop2d[9]);
3353 emit_andimm(sl,0x03e0,temp);
3354 emit_shlimm(temp,2,temp);
3355 emit_writeword(temp,®_cop2d[10]);
3356 emit_andimm(sl,0x7c00,temp);
3357 emit_shrimm(temp,3,temp);
3358 emit_writeword(temp,®_cop2d[11]);
3359 emit_writeword(sl,®_cop2d[28]);
3362 emit_xorsar_imm(sl,sl,31,temp);
3363 #if defined(HAVE_ARMV5) || defined(__aarch64__)
3364 emit_clz(temp,temp);
3366 emit_movs(temp,HOST_TEMPREG);
3367 emit_movimm(0,temp);
3368 emit_jeq((int)out+4*4);
3369 emit_addpl_imm(temp,1,temp);
3370 emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
3371 emit_jns((int)out-2*4);
3373 emit_writeword(sl,®_cop2d[30]);
3374 emit_writeword(temp,®_cop2d[31]);
3379 emit_writeword(sl,®_cop2d[copr]);
3384 static void c2ls_assemble(int i, const struct regstat *i_regs)
3389 int memtarget=0,c=0;
3391 enum stub_type type;
3392 int agr=AGEN1+(i&1);
3393 int fastio_reg_override=-1;
3394 u_int reglist=get_host_reglist(i_regs->regmap);
3395 u_int copr=(source[i]>>16)&0x1f;
3396 s=get_reg(i_regs->regmap,rs1[i]);
3397 tl=get_reg(i_regs->regmap,FTEMP);
3402 if(i_regs->regmap[HOST_CCREG]==CCREG)
3403 reglist&=~(1<<HOST_CCREG);
3406 if (opcode[i]==0x3a) { // SWC2
3407 ar=get_reg(i_regs->regmap,agr);
3408 if(ar<0) ar=get_reg(i_regs->regmap,-1);
3413 if(s>=0) c=(i_regs->wasconst>>s)&1;
3414 memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3415 if (!offset&&!c&&s>=0) ar=s;
3418 if (opcode[i]==0x3a) { // SWC2
3419 cop2_call_stall_check(0, i, i_regs, reglist_exclude(reglist, tl, -1));
3420 cop2_get_dreg(copr,tl,-1);
3428 emit_jmp(0); // inline_readstub/inline_writestub?
3432 jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3434 else if(ram_offset&&memtarget) {
3435 host_tempreg_acquire();
3436 emit_addimm(ar,ram_offset,HOST_TEMPREG);
3437 fastio_reg_override=HOST_TEMPREG;
3439 if (opcode[i]==0x32) { // LWC2
3441 if(fastio_reg_override>=0) a=fastio_reg_override;
3442 emit_readword_indexed(0,a,tl);
3444 if (opcode[i]==0x3a) { // SWC2
3445 #ifdef DESTRUCTIVE_SHIFT
3446 if(!offset&&!c&&s>=0) emit_mov(s,ar);
3449 if(fastio_reg_override>=0) a=fastio_reg_override;
3450 emit_writeword_indexed(tl,0,a);
3453 if(fastio_reg_override==HOST_TEMPREG)
3454 host_tempreg_release();
3456 add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
3457 if(opcode[i]==0x3a) // SWC2
3458 if(!(i_regs->waswritten&(1<<rs1[i])) && !HACK_ENABLED(NDHACK_NO_SMC_CHECK)) {
3459 #if defined(HOST_IMM8)
3460 int ir=get_reg(i_regs->regmap,INVCP);
3462 emit_cmpmem_indexedsr12_reg(ir,ar,1);
3464 emit_cmpmem_indexedsr12_imm(invalid_code,ar,1);
3466 #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3467 emit_callne(invalidate_addr_reg[ar]);
3471 add_stub(INVCODE_STUB,jaddr3,out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3474 if (opcode[i]==0x32) { // LWC2
3475 host_tempreg_acquire();
3476 cop2_put_dreg(copr,tl,HOST_TEMPREG);
3477 host_tempreg_release();
3481 static void cop2_assemble(int i, const struct regstat *i_regs)
3483 u_int copr = (source[i]>>11) & 0x1f;
3484 signed char temp = get_reg(i_regs->regmap, -1);
3486 if (opcode2[i] == 0 || opcode2[i] == 2) { // MFC2/CFC2
3487 if (!HACK_ENABLED(NDHACK_GTE_NO_STALL)) {
3488 signed char tl = get_reg(i_regs->regmap, rt1[i]);
3489 u_int reglist = reglist_exclude(get_host_reglist(i_regs->regmap), tl, temp);
3490 cop2_call_stall_check(0, i, i_regs, reglist);
3493 if (opcode2[i]==0) { // MFC2
3494 signed char tl=get_reg(i_regs->regmap,rt1[i]);
3495 if(tl>=0&&rt1[i]!=0)
3496 cop2_get_dreg(copr,tl,temp);
3498 else if (opcode2[i]==4) { // MTC2
3499 signed char sl=get_reg(i_regs->regmap,rs1[i]);
3500 cop2_put_dreg(copr,sl,temp);
3502 else if (opcode2[i]==2) // CFC2
3504 signed char tl=get_reg(i_regs->regmap,rt1[i]);
3505 if(tl>=0&&rt1[i]!=0)
3506 emit_readword(®_cop2c[copr],tl);
3508 else if (opcode2[i]==6) // CTC2
3510 signed char sl=get_reg(i_regs->regmap,rs1[i]);
3519 emit_signextend16(sl,temp);
3522 c2op_ctc2_31_assemble(sl,temp);
3528 emit_writeword(temp,®_cop2c[copr]);
3533 static void do_unalignedwritestub(int n)
3535 assem_debug("do_unalignedwritestub %x\n",start+stubs[n].a*4);
3537 set_jump_target(stubs[n].addr, out);
3540 struct regstat *i_regs=(struct regstat *)stubs[n].c;
3541 int addr=stubs[n].b;
3542 u_int reglist=stubs[n].e;
3543 signed char *i_regmap=i_regs->regmap;
3544 int temp2=get_reg(i_regmap,FTEMP);
3546 rt=get_reg(i_regmap,rs2[i]);
3549 assert(opcode[i]==0x2a||opcode[i]==0x2e); // SWL/SWR only implemented
3551 reglist&=~(1<<temp2);
3554 // don't bother with it and call write handler
3557 int cc=get_reg(i_regmap,CCREG);
3559 emit_loadreg(CCREG,2);
3560 emit_addimm(cc<0?2:cc,CLOCK_ADJUST((int)stubs[n].d+1),2);
3561 emit_far_call((opcode[i]==0x2a?jump_handle_swl:jump_handle_swr));
3562 emit_addimm(0,-CLOCK_ADJUST((int)stubs[n].d+1),cc<0?2:cc);
3564 emit_storereg(CCREG,2);
3565 restore_regs(reglist);
3566 emit_jmp(stubs[n].retaddr); // return address
3568 emit_andimm(addr,0xfffffffc,temp2);
3569 emit_writeword(temp2,&address);
3572 emit_shrimm(addr,16,1);
3573 int cc=get_reg(i_regmap,CCREG);
3575 emit_loadreg(CCREG,2);
3577 emit_movimm((u_int)readmem,0);
3578 emit_addimm(cc<0?2:cc,2*stubs[n].d+2,2);
3579 emit_call((int)&indirect_jump_indexed);
3580 restore_regs(reglist);
3582 emit_readword(&readmem_dword,temp2);
3583 int temp=addr; //hmh
3584 emit_shlimm(addr,3,temp);
3585 emit_andimm(temp,24,temp);
3586 if (opcode[i]==0x2a) // SWL
3587 emit_xorimm(temp,24,temp);
3588 emit_movimm(-1,HOST_TEMPREG);
3589 if (opcode[i]==0x2a) { // SWL
3590 emit_bic_lsr(temp2,HOST_TEMPREG,temp,temp2);
3591 emit_orrshr(rt,temp,temp2);
3593 emit_bic_lsl(temp2,HOST_TEMPREG,temp,temp2);
3594 emit_orrshl(rt,temp,temp2);
3596 emit_readword(&address,addr);
3597 emit_writeword(temp2,&word);
3598 //save_regs(reglist); // don't need to, no state changes
3599 emit_shrimm(addr,16,1);
3600 emit_movimm((u_int)writemem,0);
3601 //emit_call((int)&indirect_jump_indexed);
3603 emit_readword_dualindexedx4(0,1,15);
3604 emit_readword(&Count,HOST_TEMPREG);
3605 emit_readword(&next_interupt,2);
3606 emit_addimm(HOST_TEMPREG,-2*stubs[n].d-2,HOST_TEMPREG);
3607 emit_writeword(2,&last_count);
3608 emit_sub(HOST_TEMPREG,2,cc<0?HOST_TEMPREG:cc);
3610 emit_storereg(CCREG,HOST_TEMPREG);
3612 restore_regs(reglist);
3613 emit_jmp(stubs[n].retaddr); // return address
3617 #ifndef multdiv_assemble
3618 void multdiv_assemble(int i,struct regstat *i_regs)
3620 printf("Need multdiv_assemble for this architecture.\n");
3625 static void mov_assemble(int i,struct regstat *i_regs)
3627 //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3628 //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3631 tl=get_reg(i_regs->regmap,rt1[i]);
3634 sl=get_reg(i_regs->regmap,rs1[i]);
3635 if(sl>=0) emit_mov(sl,tl);
3636 else emit_loadreg(rs1[i],tl);
3641 // call interpreter, exception handler, things that change pc/regs/cycles ...
3642 static void call_c_cpu_handler(int i, const struct regstat *i_regs, u_int pc, void *func)
3644 signed char ccreg=get_reg(i_regs->regmap,CCREG);
3645 assert(ccreg==HOST_CCREG);
3646 assert(!is_delayslot);
3649 emit_movimm(pc,3); // Get PC
3650 emit_readword(&last_count,2);
3651 emit_writeword(3,&psxRegs.pc);
3652 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3653 emit_add(2,HOST_CCREG,2);
3654 emit_writeword(2,&psxRegs.cycle);
3655 emit_far_call(func);
3656 emit_far_jump(jump_to_new_pc);
3659 static void syscall_assemble(int i,struct regstat *i_regs)
3661 emit_movimm(0x20,0); // cause code
3662 emit_movimm(0,1); // not in delay slot
3663 call_c_cpu_handler(i,i_regs,start+i*4,psxException);
3666 static void hlecall_assemble(int i,struct regstat *i_regs)
3668 void *hlefunc = psxNULL;
3669 uint32_t hleCode = source[i] & 0x03ffffff;
3670 if (hleCode < ARRAY_SIZE(psxHLEt))
3671 hlefunc = psxHLEt[hleCode];
3673 call_c_cpu_handler(i,i_regs,start+i*4+4,hlefunc);
3676 static void intcall_assemble(int i,struct regstat *i_regs)
3678 call_c_cpu_handler(i,i_regs,start+i*4,execI);
3681 static void speculate_mov(int rs,int rt)
3684 smrv_strong_next|=1<<rt;
3689 static void speculate_mov_weak(int rs,int rt)
3692 smrv_weak_next|=1<<rt;
3697 static void speculate_register_values(int i)
3700 memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
3701 // gp,sp are likely to stay the same throughout the block
3702 smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
3703 smrv_weak_next=~smrv_strong_next;
3704 //printf(" llr %08x\n", smrv[4]);
3706 smrv_strong=smrv_strong_next;
3707 smrv_weak=smrv_weak_next;
3710 if ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
3711 else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]);
3712 else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
3713 else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]);
3715 smrv_strong_next&=~(1<<rt1[i]);
3716 smrv_weak_next&=~(1<<rt1[i]);
3720 smrv_strong_next&=~(1<<rt1[i]);
3721 smrv_weak_next&=~(1<<rt1[i]);
3724 if(rt1[i]&&is_const(®s[i],rt1[i])) {
3725 int value,hr=get_reg(regs[i].regmap,rt1[i]);
3727 if(get_final_value(hr,i,&value))
3729 else smrv[rt1[i]]=constmap[i][hr];
3730 smrv_strong_next|=1<<rt1[i];
3734 if ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
3735 else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
3739 if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) {
3740 // special case for BIOS
3741 smrv[rt1[i]]=0xa0000000;
3742 smrv_strong_next|=1<<rt1[i];
3749 smrv_strong_next&=~(1<<rt1[i]);
3750 smrv_weak_next&=~(1<<rt1[i]);
3754 if(opcode2[i]==0||opcode2[i]==2) { // MFC/CFC
3755 smrv_strong_next&=~(1<<rt1[i]);
3756 smrv_weak_next&=~(1<<rt1[i]);
3760 if (opcode[i]==0x32) { // LWC2
3761 smrv_strong_next&=~(1<<rt1[i]);
3762 smrv_weak_next&=~(1<<rt1[i]);
3768 printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
3769 ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
3773 static void ds_assemble(int i,struct regstat *i_regs)
3775 speculate_register_values(i);
3779 alu_assemble(i,i_regs);break;
3781 imm16_assemble(i,i_regs);break;
3783 shift_assemble(i,i_regs);break;
3785 shiftimm_assemble(i,i_regs);break;
3787 load_assemble(i,i_regs);break;
3789 loadlr_assemble(i,i_regs);break;
3791 store_assemble(i,i_regs);break;
3793 storelr_assemble(i,i_regs);break;
3795 cop0_assemble(i,i_regs);break;
3797 cop1_assemble(i,i_regs);break;
3799 c1ls_assemble(i,i_regs);break;
3801 cop2_assemble(i,i_regs);break;
3803 c2ls_assemble(i,i_regs);break;
3805 c2op_assemble(i,i_regs);break;
3807 multdiv_assemble(i,i_regs);break;
3809 mov_assemble(i,i_regs);break;
3818 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
3823 // Is the branch target a valid internal jump?
3824 static int internal_branch(int addr)
3826 if(addr&1) return 0; // Indirect (register) jump
3827 if(addr>=start && addr<start+slen*4-4)
3834 static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
3837 for(hr=0;hr<HOST_REGS;hr++) {
3838 if(hr!=EXCLUDE_REG) {
3839 if(pre[hr]!=entry[hr]) {
3842 if(get_reg(entry,pre[hr])<0) {
3844 if(!((u>>pre[hr])&1))
3845 emit_storereg(pre[hr],hr);
3852 // Move from one register to another (no writeback)
3853 for(hr=0;hr<HOST_REGS;hr++) {
3854 if(hr!=EXCLUDE_REG) {
3855 if(pre[hr]!=entry[hr]) {
3856 if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3858 if((nr=get_reg(entry,pre[hr]))>=0) {
3867 // Load the specified registers
3868 // This only loads the registers given as arguments because
3869 // we don't want to load things that will be overwritten
3870 static void load_regs(signed char entry[],signed char regmap[],int rs1,int rs2)
3874 for(hr=0;hr<HOST_REGS;hr++) {
3875 if(hr!=EXCLUDE_REG&®map[hr]>=0) {
3876 if(entry[hr]!=regmap[hr]) {
3877 if(regmap[hr]==rs1||regmap[hr]==rs2)
3884 emit_loadreg(regmap[hr],hr);
3892 // Load registers prior to the start of a loop
3893 // so that they are not loaded within the loop
3894 static void loop_preload(signed char pre[],signed char entry[])
3897 for(hr=0;hr<HOST_REGS;hr++) {
3898 if(hr!=EXCLUDE_REG) {
3899 if(pre[hr]!=entry[hr]) {
3901 if(get_reg(pre,entry[hr])<0) {
3902 assem_debug("loop preload:\n");
3903 //printf("loop preload: %d\n",hr);
3907 else if(entry[hr]<TEMPREG)
3909 emit_loadreg(entry[hr],hr);
3911 else if(entry[hr]-64<TEMPREG)
3913 emit_loadreg(entry[hr],hr);
3922 // Generate address for load/store instruction
3923 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3924 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3926 if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3928 int agr=AGEN1+(i&1);
3929 if(itype[i]==LOAD) {
3930 ra=get_reg(i_regs->regmap,rt1[i]);
3931 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3934 if(itype[i]==LOADLR) {
3935 ra=get_reg(i_regs->regmap,FTEMP);
3937 if(itype[i]==STORE||itype[i]==STORELR) {
3938 ra=get_reg(i_regs->regmap,agr);
3939 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3941 if(itype[i]==C1LS||itype[i]==C2LS) {
3942 if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3943 ra=get_reg(i_regs->regmap,FTEMP);
3944 else { // SWC1/SDC1/SWC2/SDC2
3945 ra=get_reg(i_regs->regmap,agr);
3946 if(ra<0) ra=get_reg(i_regs->regmap,-1);
3949 int rs=get_reg(i_regs->regmap,rs1[i]);
3952 int c=(i_regs->wasconst>>rs)&1;
3954 // Using r0 as a base address
3955 if(!entry||entry[ra]!=agr) {
3956 if (opcode[i]==0x22||opcode[i]==0x26) {
3957 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3958 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3959 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3961 emit_movimm(offset,ra);
3963 } // else did it in the previous cycle
3966 if(!entry||entry[ra]!=rs1[i])
3967 emit_loadreg(rs1[i],ra);
3968 //if(!entry||entry[ra]!=rs1[i])
3969 // printf("poor load scheduling!\n");
3972 if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3973 if(!entry||entry[ra]!=agr) {
3974 if (opcode[i]==0x22||opcode[i]==0x26) {
3975 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3976 }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3977 emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3979 emit_movimm(constmap[i][rs]+offset,ra);
3980 regs[i].loadedconst|=1<<ra;
3982 } // else did it in the previous cycle
3983 } // else load_consts already did it
3985 if(offset&&!c&&rs1[i]) {
3987 emit_addimm(rs,offset,ra);
3989 emit_addimm(ra,offset,ra);
3994 // Preload constants for next instruction
3995 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3998 agr=AGEN1+((i+1)&1);
3999 ra=get_reg(i_regs->regmap,agr);
4001 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4002 int offset=imm[i+1];
4003 int c=(regs[i+1].wasconst>>rs)&1;
4004 if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4005 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4006 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4007 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4008 emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4010 emit_movimm(constmap[i+1][rs]+offset,ra);
4011 regs[i+1].loadedconst|=1<<ra;
4014 else if(rs1[i+1]==0) {
4015 // Using r0 as a base address
4016 if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4017 emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4018 }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4019 emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4021 emit_movimm(offset,ra);
4028 static int get_final_value(int hr, int i, int *value)
4030 int reg=regs[i].regmap[hr];
4032 if(regs[i+1].regmap[hr]!=reg) break;
4033 if(!((regs[i+1].isconst>>hr)&1)) break;
4038 if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4039 *value=constmap[i][hr];
4043 if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4044 // Load in delay slot, out-of-order execution
4045 if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4047 // Precompute load address
4048 *value=constmap[i][hr]+imm[i+2];
4052 if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4054 // Precompute load address
4055 *value=constmap[i][hr]+imm[i+1];
4056 //printf("c=%x imm=%lx\n",(long)constmap[i][hr],imm[i+1]);
4061 *value=constmap[i][hr];
4062 //printf("c=%lx\n",(long)constmap[i][hr]);
4063 if(i==slen-1) return 1;
4065 return !((unneeded_reg[i+1]>>reg)&1);
4068 // Load registers with known constants
4069 static void load_consts(signed char pre[],signed char regmap[],int i)
4072 // propagate loaded constant flags
4074 regs[i].loadedconst=0;
4076 for(hr=0;hr<HOST_REGS;hr++) {
4077 if(hr!=EXCLUDE_REG&®map[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
4078 &®map[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
4080 regs[i].loadedconst|=1<<hr;
4085 for(hr=0;hr<HOST_REGS;hr++) {
4086 if(hr!=EXCLUDE_REG&®map[hr]>=0) {
4087 //if(entry[hr]!=regmap[hr]) {
4088 if(!((regs[i].loadedconst>>hr)&1)) {
4089 assert(regmap[hr]<64);
4090 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4091 int value,similar=0;
4092 if(get_final_value(hr,i,&value)) {
4093 // see if some other register has similar value
4094 for(hr2=0;hr2<HOST_REGS;hr2++) {
4095 if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
4096 if(is_similar_value(value,constmap[i][hr2])) {
4104 if(get_final_value(hr2,i,&value2)) // is this needed?
4105 emit_movimm_from(value2,hr2,value,hr);
4107 emit_movimm(value,hr);
4113 emit_movimm(value,hr);
4116 regs[i].loadedconst|=1<<hr;
4123 void load_all_consts(signed char regmap[], u_int dirty, int i)
4127 for(hr=0;hr<HOST_REGS;hr++) {
4128 if(hr!=EXCLUDE_REG&®map[hr]>=0&&((dirty>>hr)&1)) {
4129 assert(regmap[hr] < 64);
4130 if(((regs[i].isconst>>hr)&1)&®map[hr]>0) {
4131 int value=constmap[i][hr];
4136 emit_movimm(value,hr);
4143 // Write out all dirty registers (except cycle count)
4144 static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty)
4147 for(hr=0;hr<HOST_REGS;hr++) {
4148 if(hr!=EXCLUDE_REG) {
4149 if(i_regmap[hr]>0) {
4150 if(i_regmap[hr]!=CCREG) {
4151 if((i_dirty>>hr)&1) {
4152 assert(i_regmap[hr]<64);
4153 emit_storereg(i_regmap[hr],hr);
4161 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4162 // This writes the registers not written by store_regs_bt
4163 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr)
4166 int t=(addr-start)>>2;
4167 for(hr=0;hr<HOST_REGS;hr++) {
4168 if(hr!=EXCLUDE_REG) {
4169 if(i_regmap[hr]>0) {
4170 if(i_regmap[hr]!=CCREG) {
4171 if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
4172 if((i_dirty>>hr)&1) {
4173 assert(i_regmap[hr]<64);
4174 emit_storereg(i_regmap[hr],hr);
4183 // Load all registers (except cycle count)
4184 void load_all_regs(signed char i_regmap[])
4187 for(hr=0;hr<HOST_REGS;hr++) {
4188 if(hr!=EXCLUDE_REG) {
4189 if(i_regmap[hr]==0) {
4193 if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4195 emit_loadreg(i_regmap[hr],hr);
4201 // Load all current registers also needed by next instruction
4202 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4205 for(hr=0;hr<HOST_REGS;hr++) {
4206 if(hr!=EXCLUDE_REG) {
4207 if(get_reg(next_regmap,i_regmap[hr])>=0) {
4208 if(i_regmap[hr]==0) {
4212 if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4214 emit_loadreg(i_regmap[hr],hr);
4221 // Load all regs, storing cycle count if necessary
4222 void load_regs_entry(int t)
4225 if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4226 else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4227 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4228 emit_storereg(CCREG,HOST_CCREG);
4231 for(hr=0;hr<HOST_REGS;hr++) {
4232 if(regs[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
4233 if(regs[t].regmap_entry[hr]==0) {
4236 else if(regs[t].regmap_entry[hr]!=CCREG)
4238 emit_loadreg(regs[t].regmap_entry[hr],hr);
4244 // Store dirty registers prior to branch
4245 void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4247 if(internal_branch(addr))
4249 int t=(addr-start)>>2;
4251 for(hr=0;hr<HOST_REGS;hr++) {
4252 if(hr!=EXCLUDE_REG) {
4253 if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4254 if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
4255 if((i_dirty>>hr)&1) {
4256 assert(i_regmap[hr]<64);
4257 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4258 emit_storereg(i_regmap[hr],hr);
4267 // Branch out of this block, write out all dirty regs
4268 wb_dirtys(i_regmap,i_dirty);
4272 // Load all needed registers for branch target
4273 static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4275 //if(addr>=start && addr<(start+slen*4))
4276 if(internal_branch(addr))
4278 int t=(addr-start)>>2;
4280 // Store the cycle count before loading something else
4281 if(i_regmap[HOST_CCREG]!=CCREG) {
4282 assert(i_regmap[HOST_CCREG]==-1);
4284 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4285 emit_storereg(CCREG,HOST_CCREG);
4288 for(hr=0;hr<HOST_REGS;hr++) {
4289 if(hr!=EXCLUDE_REG&®s[t].regmap_entry[hr]>=0&®s[t].regmap_entry[hr]<TEMPREG) {
4290 if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4291 if(regs[t].regmap_entry[hr]==0) {
4294 else if(regs[t].regmap_entry[hr]!=CCREG)
4296 emit_loadreg(regs[t].regmap_entry[hr],hr);
4304 static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4306 if(addr>=start && addr<start+slen*4-4)
4308 int t=(addr-start)>>2;
4310 if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4311 for(hr=0;hr<HOST_REGS;hr++)
4315 if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4317 if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4324 if(i_regmap[hr]<TEMPREG)
4326 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4329 else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4335 else // Same register but is it 32-bit or dirty?
4338 if(!((regs[t].dirty>>hr)&1))
4342 if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4344 //printf("%x: dirty no match\n",addr);
4352 // Delay slots are not valid branch targets
4353 //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP)) return 0;
4354 // Delay slots require additional processing, so do not match
4355 if(is_ds[t]) return 0;
4360 for(hr=0;hr<HOST_REGS;hr++)
4366 if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4381 static void drc_dbg_emit_do_cmp(int i)
4383 extern void do_insn_cmp();
4385 u_int hr, reglist = get_host_reglist(regs[i].regmap);
4387 assem_debug("//do_insn_cmp %08x\n", start+i*4);
4389 // write out changed consts to match the interpreter
4390 if (i > 0 && !bt[i]) {
4391 for (hr = 0; hr < HOST_REGS; hr++) {
4392 int reg = regs[i-1].regmap[hr];
4393 if (hr == EXCLUDE_REG || reg < 0)
4395 if (!((regs[i-1].isconst >> hr) & 1))
4397 if (i > 1 && reg == regs[i-2].regmap[hr] && constmap[i-1][hr] == constmap[i-2][hr])
4399 emit_movimm(constmap[i-1][hr],0);
4400 emit_storereg(reg, 0);
4403 emit_movimm(start+i*4,0);
4404 emit_writeword(0,&pcaddr);
4405 emit_far_call(do_insn_cmp);
4406 //emit_readword(&cycle,0);
4407 //emit_addimm(0,2,0);
4408 //emit_writeword(0,&cycle);
4410 restore_regs(reglist);
4411 assem_debug("\\\\do_insn_cmp\n");
4414 #define drc_dbg_emit_do_cmp(x)
4417 // Used when a branch jumps into the delay slot of another branch
4418 static void ds_assemble_entry(int i)
4420 int t=(ba[i]-start)>>2;
4422 instr_addr[t] = out;
4423 assem_debug("Assemble delay slot at %x\n",ba[i]);
4424 assem_debug("<->\n");
4425 drc_dbg_emit_do_cmp(t);
4426 if(regs[t].regmap_entry[HOST_CCREG]==CCREG&®s[t].regmap[HOST_CCREG]!=CCREG)
4427 wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
4428 load_regs(regs[t].regmap_entry,regs[t].regmap,rs1[t],rs2[t]);
4429 address_generation(t,®s[t],regs[t].regmap_entry);
4430 if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4431 load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP);
4435 alu_assemble(t,®s[t]);break;
4437 imm16_assemble(t,®s[t]);break;
4439 shift_assemble(t,®s[t]);break;
4441 shiftimm_assemble(t,®s[t]);break;
4443 load_assemble(t,®s[t]);break;
4445 loadlr_assemble(t,®s[t]);break;
4447 store_assemble(t,®s[t]);break;
4449 storelr_assemble(t,®s[t]);break;
4451 cop0_assemble(t,®s[t]);break;
4453 cop1_assemble(t,®s[t]);break;
4455 c1ls_assemble(t,®s[t]);break;
4457 cop2_assemble(t,®s[t]);break;
4459 c2ls_assemble(t,®s[t]);break;
4461 c2op_assemble(t,®s[t]);break;
4463 multdiv_assemble(t,®s[t]);break;
4465 mov_assemble(t,®s[t]);break;
4474 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
4476 store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4477 load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4478 if(internal_branch(ba[i]+4))
4479 assem_debug("branch: internal\n");
4481 assem_debug("branch: external\n");
4482 assert(internal_branch(ba[i]+4));
4483 add_to_linker(out,ba[i]+4,internal_branch(ba[i]+4));
4487 static void emit_extjump(void *addr, u_int target)
4489 emit_extjump2(addr, target, dyna_linker);
4492 static void emit_extjump_ds(void *addr, u_int target)
4494 emit_extjump2(addr, target, dyna_linker_ds);
4497 // Load 2 immediates optimizing for small code size
4498 static void emit_mov2imm_compact(int imm1,u_int rt1,int imm2,u_int rt2)
4500 emit_movimm(imm1,rt1);
4501 emit_movimm_from(imm1,rt1,imm2,rt2);
4504 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4514 //if(ba[i]>=start && ba[i]<(start+slen*4))
4515 if(internal_branch(ba[i]))
4518 if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4526 if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4528 if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4530 //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4531 emit_andimm(HOST_CCREG,3,HOST_CCREG);
4535 else if(*adj==0||invert) {
4536 int cycles=CLOCK_ADJUST(count+2);
4541 if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4542 cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4545 emit_addimm_and_set_flags(cycles,HOST_CCREG);
4551 emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4555 add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4558 static void do_ccstub(int n)
4561 assem_debug("do_ccstub %x\n",start+(u_int)stubs[n].b*4);
4562 set_jump_target(stubs[n].addr, out);
4564 if(stubs[n].d==NULLDS) {
4565 // Delay slot instruction is nullified ("likely" branch)
4566 wb_dirtys(regs[i].regmap,regs[i].dirty);
4568 else if(stubs[n].d!=TAKEN) {
4569 wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
4572 if(internal_branch(ba[i]))
4573 wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4577 // Save PC as return address
4578 emit_movimm(stubs[n].c,EAX);
4579 emit_writeword(EAX,&pcaddr);
4583 // Return address depends on which way the branch goes
4584 if(itype[i]==CJUMP||itype[i]==SJUMP)
4586 int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4587 int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4598 #ifdef DESTRUCTIVE_WRITEBACK
4600 if((branch_regs[i].dirty>>s1l)&&1)
4601 emit_loadreg(rs1[i],s1l);
4604 if((branch_regs[i].dirty>>s1l)&1)
4605 emit_loadreg(rs2[i],s1l);
4608 if((branch_regs[i].dirty>>s2l)&1)
4609 emit_loadreg(rs2[i],s2l);
4612 int addr=-1,alt=-1,ntaddr=-1;
4615 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4616 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4617 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4625 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4626 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4627 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4633 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4637 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4638 (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4639 (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4645 assert(hr<HOST_REGS);
4647 if((opcode[i]&0x2f)==4) // BEQ
4649 #ifdef HAVE_CMOV_IMM
4650 if(s2l>=0) emit_cmp(s1l,s2l);
4651 else emit_test(s1l,s1l);
4652 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4654 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4655 if(s2l>=0) emit_cmp(s1l,s2l);
4656 else emit_test(s1l,s1l);
4657 emit_cmovne_reg(alt,addr);
4660 if((opcode[i]&0x2f)==5) // BNE
4662 #ifdef HAVE_CMOV_IMM
4663 if(s2l>=0) emit_cmp(s1l,s2l);
4664 else emit_test(s1l,s1l);
4665 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4667 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4668 if(s2l>=0) emit_cmp(s1l,s2l);
4669 else emit_test(s1l,s1l);
4670 emit_cmovne_reg(alt,addr);
4673 if((opcode[i]&0x2f)==6) // BLEZ
4675 //emit_movimm(ba[i],alt);
4676 //emit_movimm(start+i*4+8,addr);
4677 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4679 emit_cmovl_reg(alt,addr);
4681 if((opcode[i]&0x2f)==7) // BGTZ
4683 //emit_movimm(ba[i],addr);
4684 //emit_movimm(start+i*4+8,ntaddr);
4685 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4687 emit_cmovl_reg(ntaddr,addr);
4689 if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4691 //emit_movimm(ba[i],alt);
4692 //emit_movimm(start+i*4+8,addr);
4693 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4695 emit_cmovs_reg(alt,addr);
4697 if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4699 //emit_movimm(ba[i],addr);
4700 //emit_movimm(start+i*4+8,alt);
4701 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4703 emit_cmovs_reg(alt,addr);
4705 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4706 if(source[i]&0x10000) // BC1T
4708 //emit_movimm(ba[i],alt);
4709 //emit_movimm(start+i*4+8,addr);
4710 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4711 emit_testimm(s1l,0x800000);
4712 emit_cmovne_reg(alt,addr);
4716 //emit_movimm(ba[i],addr);
4717 //emit_movimm(start+i*4+8,alt);
4718 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4719 emit_testimm(s1l,0x800000);
4720 emit_cmovne_reg(alt,addr);
4723 emit_writeword(addr,&pcaddr);
4728 int r=get_reg(branch_regs[i].regmap,rs1[i]);
4729 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4730 r=get_reg(branch_regs[i].regmap,RTEMP);
4732 emit_writeword(r,&pcaddr);
4734 else {SysPrintf("Unknown branch type in do_ccstub\n");abort();}
4736 // Update cycle count
4737 assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4738 if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
4739 emit_far_call(cc_interrupt);
4740 if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
4741 if(stubs[n].d==TAKEN) {
4742 if(internal_branch(ba[i]))
4743 load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4744 else if(itype[i]==RJUMP) {
4745 if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4746 emit_readword(&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4748 emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4750 }else if(stubs[n].d==NOTTAKEN) {
4751 if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4752 else load_all_regs(branch_regs[i].regmap);
4753 }else if(stubs[n].d==NULLDS) {
4754 // Delay slot instruction is nullified ("likely" branch)
4755 if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4756 else load_all_regs(regs[i].regmap);
4758 load_all_regs(branch_regs[i].regmap);
4760 if (stubs[n].retaddr)
4761 emit_jmp(stubs[n].retaddr);
4763 do_jump_vaddr(stubs[n].e);
4766 static void add_to_linker(void *addr, u_int target, int ext)
4768 assert(linkcount < ARRAY_SIZE(link_addr));
4769 link_addr[linkcount].addr = addr;
4770 link_addr[linkcount].target = target;
4771 link_addr[linkcount].ext = ext;
4775 static void ujump_assemble_write_ra(int i)
4778 unsigned int return_address;
4779 rt=get_reg(branch_regs[i].regmap,31);
4780 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4782 return_address=start+i*4+8;
4785 if(internal_branch(return_address)&&rt1[i+1]!=31) {
4786 int temp=-1; // note: must be ds-safe
4790 if(temp>=0) do_miniht_insert(return_address,rt,temp);
4791 else emit_movimm(return_address,rt);
4799 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4802 emit_movimm(return_address,rt); // PC into link register
4804 emit_prefetch(hash_table_get(return_address));
4810 static void ujump_assemble(int i,struct regstat *i_regs)
4813 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4814 address_generation(i+1,i_regs,regs[i].regmap_entry);
4816 int temp=get_reg(branch_regs[i].regmap,PTEMP);
4817 if(rt1[i]==31&&temp>=0)
4819 signed char *i_regmap=i_regs->regmap;
4820 int return_address=start+i*4+8;
4821 if(get_reg(branch_regs[i].regmap,31)>0)
4822 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4825 if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4826 ujump_assemble_write_ra(i); // writeback ra for DS
4829 ds_assemble(i+1,i_regs);
4830 uint64_t bc_unneeded=branch_regs[i].u;
4831 bc_unneeded|=1|(1LL<<rt1[i]);
4832 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
4833 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
4834 if(!ra_done&&rt1[i]==31)
4835 ujump_assemble_write_ra(i);
4837 cc=get_reg(branch_regs[i].regmap,CCREG);
4838 assert(cc==HOST_CCREG);
4839 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4841 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4843 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4844 if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4845 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4846 if(internal_branch(ba[i]))
4847 assem_debug("branch: internal\n");
4849 assem_debug("branch: external\n");
4850 if(internal_branch(ba[i])&&is_ds[(ba[i]-start)>>2]) {
4851 ds_assemble_entry(i);
4854 add_to_linker(out,ba[i],internal_branch(ba[i]));
4859 static void rjump_assemble_write_ra(int i)
4861 int rt,return_address;
4862 assert(rt1[i+1]!=rt1[i]);
4863 assert(rt2[i+1]!=rt1[i]);
4864 rt=get_reg(branch_regs[i].regmap,rt1[i]);
4865 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4867 return_address=start+i*4+8;
4871 if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4874 emit_movimm(return_address,rt); // PC into link register
4876 emit_prefetch(hash_table_get(return_address));
4880 static void rjump_assemble(int i,struct regstat *i_regs)
4885 rs=get_reg(branch_regs[i].regmap,rs1[i]);
4887 if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4888 // Delay slot abuse, make a copy of the branch address register
4889 temp=get_reg(branch_regs[i].regmap,RTEMP);
4891 assert(regs[i].regmap[temp]==RTEMP);
4895 address_generation(i+1,i_regs,regs[i].regmap_entry);
4899 if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4900 signed char *i_regmap=i_regs->regmap;
4901 int return_address=start+i*4+8;
4902 if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4908 int rh=get_reg(regs[i].regmap,RHASH);
4909 if(rh>=0) do_preload_rhash(rh);
4912 if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4913 rjump_assemble_write_ra(i);
4916 ds_assemble(i+1,i_regs);
4917 uint64_t bc_unneeded=branch_regs[i].u;
4918 bc_unneeded|=1|(1LL<<rt1[i]);
4919 bc_unneeded&=~(1LL<<rs1[i]);
4920 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
4921 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],CCREG);
4922 if(!ra_done&&rt1[i]!=0)
4923 rjump_assemble_write_ra(i);
4924 cc=get_reg(branch_regs[i].regmap,CCREG);
4925 assert(cc==HOST_CCREG);
4928 int rh=get_reg(branch_regs[i].regmap,RHASH);
4929 int ht=get_reg(branch_regs[i].regmap,RHTBL);
4931 if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4932 do_preload_rhtbl(ht);
4936 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
4937 #ifdef DESTRUCTIVE_WRITEBACK
4938 if((branch_regs[i].dirty>>rs)&1) {
4939 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4940 emit_loadreg(rs1[i],rs);
4945 if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4949 do_miniht_load(ht,rh);
4952 //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4953 //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4955 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4956 add_stub(CC_STUB,out,NULL,0,i,-1,TAKEN,rs);
4957 if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4958 // special case for RFE
4962 //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
4965 do_miniht_jump(rs,rh,ht);
4972 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4973 if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4977 static void cjump_assemble(int i,struct regstat *i_regs)
4979 signed char *i_regmap=i_regs->regmap;
4982 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4983 assem_debug("match=%d\n",match);
4985 int unconditional=0,nop=0;
4987 int internal=internal_branch(ba[i]);
4988 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4989 if(!match) invert=1;
4990 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4991 if(i>(ba[i]-start)>>2) invert=1;
4994 invert=1; // because of near cond. branches
4998 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4999 s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5002 s1l=get_reg(i_regmap,rs1[i]);
5003 s2l=get_reg(i_regmap,rs2[i]);
5005 if(rs1[i]==0&&rs2[i]==0)
5007 if(opcode[i]&1) nop=1;
5008 else unconditional=1;
5009 //assert(opcode[i]!=5);
5010 //assert(opcode[i]!=7);
5011 //assert(opcode[i]!=0x15);
5012 //assert(opcode[i]!=0x17);
5025 // Out of order execution (delay slot first)
5027 address_generation(i+1,i_regs,regs[i].regmap_entry);
5028 ds_assemble(i+1,i_regs);
5030 uint64_t bc_unneeded=branch_regs[i].u;
5031 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5033 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5034 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs2[i]);
5035 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5036 cc=get_reg(branch_regs[i].regmap,CCREG);
5037 assert(cc==HOST_CCREG);
5039 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5040 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5041 //assem_debug("cycle count (adj)\n");
5043 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5044 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5045 if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5046 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5048 assem_debug("branch: internal\n");
5050 assem_debug("branch: external\n");
5051 if(internal&&is_ds[(ba[i]-start)>>2]) {
5052 ds_assemble_entry(i);
5055 add_to_linker(out,ba[i],internal);
5058 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5059 if(((u_int)out)&7) emit_addnop(0);
5064 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5067 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5070 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5071 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5072 if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5074 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5076 if(opcode[i]==4) // BEQ
5078 if(s2l>=0) emit_cmp(s1l,s2l);
5079 else emit_test(s1l,s1l);
5084 add_to_linker(out,ba[i],internal);
5088 if(opcode[i]==5) // BNE
5090 if(s2l>=0) emit_cmp(s1l,s2l);
5091 else emit_test(s1l,s1l);
5096 add_to_linker(out,ba[i],internal);
5100 if(opcode[i]==6) // BLEZ
5107 add_to_linker(out,ba[i],internal);
5111 if(opcode[i]==7) // BGTZ
5118 add_to_linker(out,ba[i],internal);
5123 if(taken) set_jump_target(taken, out);
5124 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5125 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5127 emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5128 add_to_linker(out,ba[i],internal);
5131 add_to_linker(out,ba[i],internal*2);
5137 if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5138 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5139 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5141 assem_debug("branch: internal\n");
5143 assem_debug("branch: external\n");
5144 if(internal&&is_ds[(ba[i]-start)>>2]) {
5145 ds_assemble_entry(i);
5148 add_to_linker(out,ba[i],internal);
5152 set_jump_target(nottaken, out);
5155 if(nottaken1) set_jump_target(nottaken1, out);
5157 if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5159 } // (!unconditional)
5163 // In-order execution (branch first)
5164 //if(likely[i]) printf("IOL\n");
5167 void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
5168 if(!unconditional&&!nop) {
5169 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5171 if((opcode[i]&0x2f)==4) // BEQ
5173 if(s2l>=0) emit_cmp(s1l,s2l);
5174 else emit_test(s1l,s1l);
5178 if((opcode[i]&0x2f)==5) // BNE
5180 if(s2l>=0) emit_cmp(s1l,s2l);
5181 else emit_test(s1l,s1l);
5185 if((opcode[i]&0x2f)==6) // BLEZ
5191 if((opcode[i]&0x2f)==7) // BGTZ
5197 } // if(!unconditional)
5199 uint64_t ds_unneeded=branch_regs[i].u;
5200 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5204 if(taken) set_jump_target(taken, out);
5205 assem_debug("1:\n");
5206 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5208 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5209 address_generation(i+1,&branch_regs[i],0);
5210 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5211 ds_assemble(i+1,&branch_regs[i]);
5212 cc=get_reg(branch_regs[i].regmap,CCREG);
5214 emit_loadreg(CCREG,cc=HOST_CCREG);
5215 // CHECK: Is the following instruction (fall thru) allocated ok?
5217 assert(cc==HOST_CCREG);
5218 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5219 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5220 assem_debug("cycle count (adj)\n");
5221 if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5222 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5224 assem_debug("branch: internal\n");
5226 assem_debug("branch: external\n");
5227 if(internal&&is_ds[(ba[i]-start)>>2]) {
5228 ds_assemble_entry(i);
5231 add_to_linker(out,ba[i],internal);
5236 if(!unconditional) {
5237 if(nottaken1) set_jump_target(nottaken1, out);
5238 set_jump_target(nottaken, out);
5239 assem_debug("2:\n");
5241 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5242 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5243 address_generation(i+1,&branch_regs[i],0);
5244 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5245 ds_assemble(i+1,&branch_regs[i]);
5247 cc=get_reg(branch_regs[i].regmap,CCREG);
5248 if(cc==-1&&!likely[i]) {
5249 // Cycle count isn't in a register, temporarily load it then write it out
5250 emit_loadreg(CCREG,HOST_CCREG);
5251 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5254 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5255 emit_storereg(CCREG,HOST_CCREG);
5258 cc=get_reg(i_regmap,CCREG);
5259 assert(cc==HOST_CCREG);
5260 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5263 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5269 static void sjump_assemble(int i,struct regstat *i_regs)
5271 signed char *i_regmap=i_regs->regmap;
5274 match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5275 assem_debug("smatch=%d\n",match);
5277 int unconditional=0,nevertaken=0;
5279 int internal=internal_branch(ba[i]);
5280 if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5281 if(!match) invert=1;
5282 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5283 if(i>(ba[i]-start)>>2) invert=1;
5286 invert=1; // because of near cond. branches
5289 //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5290 //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5293 s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5296 s1l=get_reg(i_regmap,rs1[i]);
5300 if(opcode2[i]&1) unconditional=1;
5302 // These are never taken (r0 is never less than zero)
5303 //assert(opcode2[i]!=0);
5304 //assert(opcode2[i]!=2);
5305 //assert(opcode2[i]!=0x10);
5306 //assert(opcode2[i]!=0x12);
5310 // Out of order execution (delay slot first)
5312 address_generation(i+1,i_regs,regs[i].regmap_entry);
5313 ds_assemble(i+1,i_regs);
5315 uint64_t bc_unneeded=branch_regs[i].u;
5316 bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5318 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5319 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs1[i]);
5320 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5322 int rt,return_address;
5323 rt=get_reg(branch_regs[i].regmap,31);
5324 assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5326 // Save the PC even if the branch is not taken
5327 return_address=start+i*4+8;
5328 emit_movimm(return_address,rt); // PC into link register
5330 if(!nevertaken) emit_prefetch(hash_table_get(return_address));
5334 cc=get_reg(branch_regs[i].regmap,CCREG);
5335 assert(cc==HOST_CCREG);
5337 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5338 //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5339 assem_debug("cycle count (adj)\n");
5341 do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5342 if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5343 if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5344 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5346 assem_debug("branch: internal\n");
5348 assem_debug("branch: external\n");
5349 if(internal&&is_ds[(ba[i]-start)>>2]) {
5350 ds_assemble_entry(i);
5353 add_to_linker(out,ba[i],internal);
5356 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5357 if(((u_int)out)&7) emit_addnop(0);
5361 else if(nevertaken) {
5362 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5365 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5368 void *nottaken = NULL;
5369 do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5370 if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5373 if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5380 add_to_linker(out,ba[i],internal);
5384 if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5391 add_to_linker(out,ba[i],internal);
5398 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5399 if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5401 emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5402 add_to_linker(out,ba[i],internal);
5405 add_to_linker(out,ba[i],internal*2);
5411 if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5412 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5413 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5415 assem_debug("branch: internal\n");
5417 assem_debug("branch: external\n");
5418 if(internal&&is_ds[(ba[i]-start)>>2]) {
5419 ds_assemble_entry(i);
5422 add_to_linker(out,ba[i],internal);
5426 set_jump_target(nottaken, out);
5430 if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5432 } // (!unconditional)
5436 // In-order execution (branch first)
5438 void *nottaken = NULL;
5440 int rt,return_address;
5441 rt=get_reg(branch_regs[i].regmap,31);
5443 // Save the PC even if the branch is not taken
5444 return_address=start+i*4+8;
5445 emit_movimm(return_address,rt); // PC into link register
5447 emit_prefetch(hash_table_get(return_address));
5451 if(!unconditional) {
5452 //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5454 if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5460 if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5466 } // if(!unconditional)
5468 uint64_t ds_unneeded=branch_regs[i].u;
5469 ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5473 //assem_debug("1:\n");
5474 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5476 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5477 address_generation(i+1,&branch_regs[i],0);
5478 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5479 ds_assemble(i+1,&branch_regs[i]);
5480 cc=get_reg(branch_regs[i].regmap,CCREG);
5482 emit_loadreg(CCREG,cc=HOST_CCREG);
5483 // CHECK: Is the following instruction (fall thru) allocated ok?
5485 assert(cc==HOST_CCREG);
5486 store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5487 do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5488 assem_debug("cycle count (adj)\n");
5489 if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5490 load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5492 assem_debug("branch: internal\n");
5494 assem_debug("branch: external\n");
5495 if(internal&&is_ds[(ba[i]-start)>>2]) {
5496 ds_assemble_entry(i);
5499 add_to_linker(out,ba[i],internal);
5504 if(!unconditional) {
5505 set_jump_target(nottaken, out);
5506 assem_debug("1:\n");
5508 wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5509 load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5510 address_generation(i+1,&branch_regs[i],0);
5511 load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5512 ds_assemble(i+1,&branch_regs[i]);
5514 cc=get_reg(branch_regs[i].regmap,CCREG);
5515 if(cc==-1&&!likely[i]) {
5516 // Cycle count isn't in a register, temporarily load it then write it out
5517 emit_loadreg(CCREG,HOST_CCREG);
5518 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5521 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5522 emit_storereg(CCREG,HOST_CCREG);
5525 cc=get_reg(i_regmap,CCREG);
5526 assert(cc==HOST_CCREG);
5527 emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5530 add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5536 static void pagespan_assemble(int i,struct regstat *i_regs)
5538 int s1l=get_reg(i_regs->regmap,rs1[i]);
5539 int s2l=get_reg(i_regs->regmap,rs2[i]);
5541 void *nottaken = NULL;
5542 int unconditional=0;
5553 int addr=-1,alt=-1,ntaddr=-1;
5554 if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5558 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5559 (i_regs->regmap[hr]&63)!=rs1[i] &&
5560 (i_regs->regmap[hr]&63)!=rs2[i] )
5569 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5570 (i_regs->regmap[hr]&63)!=rs1[i] &&
5571 (i_regs->regmap[hr]&63)!=rs2[i] )
5577 if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5581 if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5582 (i_regs->regmap[hr]&63)!=rs1[i] &&
5583 (i_regs->regmap[hr]&63)!=rs2[i] )
5590 assert(hr<HOST_REGS);
5591 if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5592 load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
5594 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5595 if(opcode[i]==2) // J
5599 if(opcode[i]==3) // JAL
5602 int rt=get_reg(i_regs->regmap,31);
5603 emit_movimm(start+i*4+8,rt);
5606 if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5609 if(opcode2[i]==9) // JALR
5611 int rt=get_reg(i_regs->regmap,rt1[i]);
5612 emit_movimm(start+i*4+8,rt);
5615 if((opcode[i]&0x3f)==4) // BEQ
5622 #ifdef HAVE_CMOV_IMM
5624 if(s2l>=0) emit_cmp(s1l,s2l);
5625 else emit_test(s1l,s1l);
5626 emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5632 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5633 if(s2l>=0) emit_cmp(s1l,s2l);
5634 else emit_test(s1l,s1l);
5635 emit_cmovne_reg(alt,addr);
5638 if((opcode[i]&0x3f)==5) // BNE
5640 #ifdef HAVE_CMOV_IMM
5641 if(s2l>=0) emit_cmp(s1l,s2l);
5642 else emit_test(s1l,s1l);
5643 emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5646 emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5647 if(s2l>=0) emit_cmp(s1l,s2l);
5648 else emit_test(s1l,s1l);
5649 emit_cmovne_reg(alt,addr);
5652 if((opcode[i]&0x3f)==0x14) // BEQL
5654 if(s2l>=0) emit_cmp(s1l,s2l);
5655 else emit_test(s1l,s1l);
5656 if(nottaken) set_jump_target(nottaken, out);
5660 if((opcode[i]&0x3f)==0x15) // BNEL
5662 if(s2l>=0) emit_cmp(s1l,s2l);
5663 else emit_test(s1l,s1l);
5666 if(taken) set_jump_target(taken, out);
5668 if((opcode[i]&0x3f)==6) // BLEZ
5670 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5672 emit_cmovl_reg(alt,addr);
5674 if((opcode[i]&0x3f)==7) // BGTZ
5676 emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5678 emit_cmovl_reg(ntaddr,addr);
5680 if((opcode[i]&0x3f)==0x16) // BLEZL
5682 assert((opcode[i]&0x3f)!=0x16);
5684 if((opcode[i]&0x3f)==0x17) // BGTZL
5686 assert((opcode[i]&0x3f)!=0x17);
5688 assert(opcode[i]!=1); // BLTZ/BGEZ
5690 //FIXME: Check CSREG
5691 if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5692 if((source[i]&0x30000)==0) // BC1F
5694 emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5695 emit_testimm(s1l,0x800000);
5696 emit_cmovne_reg(alt,addr);
5698 if((source[i]&0x30000)==0x10000) // BC1T
5700 emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5701 emit_testimm(s1l,0x800000);
5702 emit_cmovne_reg(alt,addr);
5704 if((source[i]&0x30000)==0x20000) // BC1FL
5706 emit_testimm(s1l,0x800000);
5710 if((source[i]&0x30000)==0x30000) // BC1TL
5712 emit_testimm(s1l,0x800000);
5718 assert(i_regs->regmap[HOST_CCREG]==CCREG);
5719 wb_dirtys(regs[i].regmap,regs[i].dirty);
5720 if(likely[i]||unconditional)
5722 emit_movimm(ba[i],HOST_BTREG);
5724 else if(addr!=HOST_BTREG)
5726 emit_mov(addr,HOST_BTREG);
5728 void *branch_addr=out;
5730 int target_addr=start+i*4+5;
5732 void *compiled_target_addr=check_addr(target_addr);
5733 emit_extjump_ds(branch_addr, target_addr);
5734 if(compiled_target_addr) {
5735 set_jump_target(branch_addr, compiled_target_addr);
5736 add_link(target_addr,stub);
5738 else set_jump_target(branch_addr, stub);
5741 set_jump_target(nottaken, out);
5742 wb_dirtys(regs[i].regmap,regs[i].dirty);
5743 void *branch_addr=out;
5745 int target_addr=start+i*4+8;
5747 void *compiled_target_addr=check_addr(target_addr);
5748 emit_extjump_ds(branch_addr, target_addr);
5749 if(compiled_target_addr) {
5750 set_jump_target(branch_addr, compiled_target_addr);
5751 add_link(target_addr,stub);
5753 else set_jump_target(branch_addr, stub);
5757 // Assemble the delay slot for the above
5758 static void pagespan_ds()
5760 assem_debug("initial delay slot:\n");
5761 u_int vaddr=start+1;
5762 u_int page=get_page(vaddr);
5763 u_int vpage=get_vpage(vaddr);
5764 ll_add(jump_dirty+vpage,vaddr,(void *)out);
5766 ll_add(jump_in+page,vaddr,(void *)out);
5767 assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
5768 if(regs[0].regmap[HOST_CCREG]!=CCREG)
5769 wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty);
5770 if(regs[0].regmap[HOST_BTREG]!=BTREG)
5771 emit_writeword(HOST_BTREG,&branch_target);
5772 load_regs(regs[0].regmap_entry,regs[0].regmap,rs1[0],rs2[0]);
5773 address_generation(0,®s[0],regs[0].regmap_entry);
5774 if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
5775 load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP);
5779 alu_assemble(0,®s[0]);break;
5781 imm16_assemble(0,®s[0]);break;
5783 shift_assemble(0,®s[0]);break;
5785 shiftimm_assemble(0,®s[0]);break;
5787 load_assemble(0,®s[0]);break;
5789 loadlr_assemble(0,®s[0]);break;
5791 store_assemble(0,®s[0]);break;
5793 storelr_assemble(0,®s[0]);break;
5795 cop0_assemble(0,®s[0]);break;
5797 cop1_assemble(0,®s[0]);break;
5799 c1ls_assemble(0,®s[0]);break;
5801 cop2_assemble(0,®s[0]);break;
5803 c2ls_assemble(0,®s[0]);break;
5805 c2op_assemble(0,®s[0]);break;
5807 multdiv_assemble(0,®s[0]);break;
5809 mov_assemble(0,®s[0]);break;
5818 SysPrintf("Jump in the delay slot. This is probably a bug.\n");
5820 int btaddr=get_reg(regs[0].regmap,BTREG);
5822 btaddr=get_reg(regs[0].regmap,-1);
5823 emit_readword(&branch_target,btaddr);
5825 assert(btaddr!=HOST_CCREG);
5826 if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
5828 host_tempreg_acquire();
5829 emit_movimm(start+4,HOST_TEMPREG);
5830 emit_cmp(btaddr,HOST_TEMPREG);
5831 host_tempreg_release();
5833 emit_cmpimm(btaddr,start+4);
5837 store_regs_bt(regs[0].regmap,regs[0].dirty,-1);
5838 do_jump_vaddr(btaddr);
5839 set_jump_target(branch, out);
5840 store_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
5841 load_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
5844 // Basic liveness analysis for MIPS registers
5845 void unneeded_registers(int istart,int iend,int r)
5848 uint64_t u,gte_u,b,gte_b;
5849 uint64_t temp_u,temp_gte_u=0;
5850 uint64_t gte_u_unknown=0;
5851 if (HACK_ENABLED(NDHACK_GTE_UNNEEDED))
5855 gte_u=gte_u_unknown;
5857 //u=unneeded_reg[iend+1];
5859 gte_u=gte_unneeded[iend+1];
5862 for (i=iend;i>=istart;i--)
5864 //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
5865 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
5867 // If subroutine call, flag return address as a possible branch target
5868 if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
5870 if(ba[i]<start || ba[i]>=(start+slen*4))
5872 // Branch out of this block, flush all regs
5874 gte_u=gte_u_unknown;
5875 branch_unneeded_reg[i]=u;
5876 // Merge in delay slot
5877 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5878 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5881 gte_u&=~gte_rs[i+1];
5882 // If branch is "likely" (and conditional)
5883 // then we skip the delay slot on the fall-thru path
5886 u&=unneeded_reg[i+2];
5887 gte_u&=gte_unneeded[i+2];
5892 gte_u=gte_u_unknown;
5898 // Internal branch, flag target
5899 bt[(ba[i]-start)>>2]=1;
5900 if(ba[i]<=start+i*4) {
5904 // Unconditional branch
5908 // Conditional branch (not taken case)
5909 temp_u=unneeded_reg[i+2];
5910 temp_gte_u&=gte_unneeded[i+2];
5912 // Merge in delay slot
5913 temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5914 temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5916 temp_gte_u|=gte_rt[i+1];
5917 temp_gte_u&=~gte_rs[i+1];
5918 // If branch is "likely" (and conditional)
5919 // then we skip the delay slot on the fall-thru path
5922 temp_u&=unneeded_reg[i+2];
5923 temp_gte_u&=gte_unneeded[i+2];
5928 temp_gte_u=gte_u_unknown;
5931 temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
5932 temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5934 temp_gte_u|=gte_rt[i];
5935 temp_gte_u&=~gte_rs[i];
5936 unneeded_reg[i]=temp_u;
5937 gte_unneeded[i]=temp_gte_u;
5938 // Only go three levels deep. This recursion can take an
5939 // excessive amount of time if there are a lot of nested loops.
5941 unneeded_registers((ba[i]-start)>>2,i-1,r+1);
5943 unneeded_reg[(ba[i]-start)>>2]=1;
5944 gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
5949 // Unconditional branch
5950 u=unneeded_reg[(ba[i]-start)>>2];
5951 gte_u=gte_unneeded[(ba[i]-start)>>2];
5952 branch_unneeded_reg[i]=u;
5953 // Merge in delay slot
5954 u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5955 u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5958 gte_u&=~gte_rs[i+1];
5960 // Conditional branch
5961 b=unneeded_reg[(ba[i]-start)>>2];
5962 gte_b=gte_unneeded[(ba[i]-start)>>2];
5963 branch_unneeded_reg[i]=b;
5964 // Branch delay slot
5965 b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5966 b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5969 gte_b&=~gte_rs[i+1];
5970 // If branch is "likely" then we skip the
5971 // delay slot on the fall-thru path
5976 u&=unneeded_reg[i+2];
5977 gte_u&=gte_unneeded[i+2];
5984 branch_unneeded_reg[i]&=unneeded_reg[i+2];
5986 branch_unneeded_reg[i]=1;
5992 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
5994 // SYSCALL instruction (software interrupt)
5997 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
5999 // ERET instruction (return from interrupt)
6003 // Written registers are unneeded
6007 // Accessed registers are needed
6011 if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
6012 gte_u|=gte_rs[i]>e_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
6013 // Source-target dependencies
6014 // R0 is always unneeded
6018 gte_unneeded[i]=gte_u;
6020 printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6023 for(r=1;r<=CCREG;r++) {
6024 if((unneeded_reg[i]>>r)&1) {
6025 if(r==HIREG) printf(" HI");
6026 else if(r==LOREG) printf(" LO");
6027 else printf(" r%d",r);
6035 // Write back dirty registers as soon as we will no longer modify them,
6036 // so that we don't end up with lots of writes at the branches.
6037 void clean_registers(int istart,int iend,int wr)
6041 u_int will_dirty_i,will_dirty_next,temp_will_dirty;
6042 u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
6044 will_dirty_i=will_dirty_next=0;
6045 wont_dirty_i=wont_dirty_next=0;
6047 will_dirty_i=will_dirty_next=will_dirty[iend+1];
6048 wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
6050 for (i=iend;i>=istart;i--)
6052 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
6054 if(ba[i]<start || ba[i]>=(start+slen*4))
6056 // Branch out of this block, flush all regs
6059 // Unconditional branch
6062 // Merge in delay slot (will dirty)
6063 for(r=0;r<HOST_REGS;r++) {
6064 if(r!=EXCLUDE_REG) {
6065 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6066 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6067 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6068 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6069 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6070 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6071 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6072 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6073 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6074 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6075 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6076 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6077 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6078 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6084 // Conditional branch
6086 wont_dirty_i=wont_dirty_next;
6087 // Merge in delay slot (will dirty)
6088 for(r=0;r<HOST_REGS;r++) {
6089 if(r!=EXCLUDE_REG) {
6091 // Might not dirty if likely branch is not taken
6092 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6093 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6094 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6095 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6096 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6097 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
6098 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6099 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6100 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6101 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6102 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6103 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6104 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6105 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6110 // Merge in delay slot (wont dirty)
6111 for(r=0;r<HOST_REGS;r++) {
6112 if(r!=EXCLUDE_REG) {
6113 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6114 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6115 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6116 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6117 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6118 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6119 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6120 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6121 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6122 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6126 #ifndef DESTRUCTIVE_WRITEBACK
6127 branch_regs[i].dirty&=wont_dirty_i;
6129 branch_regs[i].dirty|=will_dirty_i;
6135 if(ba[i]<=start+i*4) {
6139 // Unconditional branch
6142 // Merge in delay slot (will dirty)
6143 for(r=0;r<HOST_REGS;r++) {
6144 if(r!=EXCLUDE_REG) {
6145 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6146 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6147 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6148 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6149 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6150 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6151 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6152 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6153 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6154 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6155 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6156 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6157 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6158 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6162 // Conditional branch (not taken case)
6163 temp_will_dirty=will_dirty_next;
6164 temp_wont_dirty=wont_dirty_next;
6165 // Merge in delay slot (will dirty)
6166 for(r=0;r<HOST_REGS;r++) {
6167 if(r!=EXCLUDE_REG) {
6169 // Will not dirty if likely branch is not taken
6170 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6171 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6172 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6173 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6174 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6175 if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
6176 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6177 //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6178 //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6179 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6180 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6181 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6182 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6183 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6188 // Merge in delay slot (wont dirty)
6189 for(r=0;r<HOST_REGS;r++) {
6190 if(r!=EXCLUDE_REG) {
6191 if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6192 if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6193 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6194 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6195 if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6196 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6197 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6198 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6199 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6200 if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6203 // Deal with changed mappings
6205 for(r=0;r<HOST_REGS;r++) {
6206 if(r!=EXCLUDE_REG) {
6207 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
6208 temp_will_dirty&=~(1<<r);
6209 temp_wont_dirty&=~(1<<r);
6210 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6211 temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6212 temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6214 temp_will_dirty|=1<<r;
6215 temp_wont_dirty|=1<<r;
6222 will_dirty[i]=temp_will_dirty;
6223 wont_dirty[i]=temp_wont_dirty;
6224 clean_registers((ba[i]-start)>>2,i-1,0);
6226 // Limit recursion. It can take an excessive amount
6227 // of time if there are a lot of nested loops.
6228 will_dirty[(ba[i]-start)>>2]=0;
6229 wont_dirty[(ba[i]-start)>>2]=-1;
6236 // Unconditional branch
6239 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6240 for(r=0;r<HOST_REGS;r++) {
6241 if(r!=EXCLUDE_REG) {
6242 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6243 will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
6244 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6246 if(branch_regs[i].regmap[r]>=0) {
6247 will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6248 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6253 // Merge in delay slot
6254 for(r=0;r<HOST_REGS;r++) {
6255 if(r!=EXCLUDE_REG) {
6256 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6257 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6258 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6259 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6260 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6261 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6262 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6263 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6264 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6265 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6266 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6267 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6268 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6269 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6273 // Conditional branch
6274 will_dirty_i=will_dirty_next;
6275 wont_dirty_i=wont_dirty_next;
6276 //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6277 for(r=0;r<HOST_REGS;r++) {
6278 if(r!=EXCLUDE_REG) {
6279 signed char target_reg=branch_regs[i].regmap[r];
6280 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6281 will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6282 wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6284 else if(target_reg>=0) {
6285 will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6286 wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6288 // Treat delay slot as part of branch too
6289 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6290 will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6291 wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6295 will_dirty[i+1]&=~(1<<r);
6300 // Merge in delay slot
6301 for(r=0;r<HOST_REGS;r++) {
6302 if(r!=EXCLUDE_REG) {
6304 // Might not dirty if likely branch is not taken
6305 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6306 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6307 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6308 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6309 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6310 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6311 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6312 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6313 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6314 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6315 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6316 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6317 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6318 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6323 // Merge in delay slot (won't dirty)
6324 for(r=0;r<HOST_REGS;r++) {
6325 if(r!=EXCLUDE_REG) {
6326 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6327 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6328 if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6329 if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6330 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6331 if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6332 if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6333 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6334 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6335 if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6339 #ifndef DESTRUCTIVE_WRITEBACK
6340 branch_regs[i].dirty&=wont_dirty_i;
6342 branch_regs[i].dirty|=will_dirty_i;
6347 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6349 // SYSCALL instruction (software interrupt)
6353 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6355 // ERET instruction (return from interrupt)
6359 will_dirty_next=will_dirty_i;
6360 wont_dirty_next=wont_dirty_i;
6361 for(r=0;r<HOST_REGS;r++) {
6362 if(r!=EXCLUDE_REG) {
6363 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6364 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6365 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6366 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6367 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6368 if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6369 if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6370 if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6372 if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP)
6374 // Don't store a register immediately after writing it,
6375 // may prevent dual-issue.
6376 if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6377 if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6383 will_dirty[i]=will_dirty_i;
6384 wont_dirty[i]=wont_dirty_i;
6385 // Mark registers that won't be dirtied as not dirty
6387 /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6388 for(r=0;r<HOST_REGS;r++) {
6389 if((will_dirty_i>>r)&1) {
6395 //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP)) {
6396 regs[i].dirty|=will_dirty_i;
6397 #ifndef DESTRUCTIVE_WRITEBACK
6398 regs[i].dirty&=wont_dirty_i;
6399 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
6401 if (i < iend-1 && !is_ujump(i)) {
6402 for(r=0;r<HOST_REGS;r++) {
6403 if(r!=EXCLUDE_REG) {
6404 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6405 regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6406 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6414 for(r=0;r<HOST_REGS;r++) {
6415 if(r!=EXCLUDE_REG) {
6416 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6417 regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6418 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6426 // Deal with changed mappings
6427 temp_will_dirty=will_dirty_i;
6428 temp_wont_dirty=wont_dirty_i;
6429 for(r=0;r<HOST_REGS;r++) {
6430 if(r!=EXCLUDE_REG) {
6432 if(regs[i].regmap[r]==regmap_pre[i][r]) {
6434 #ifndef DESTRUCTIVE_WRITEBACK
6435 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6437 regs[i].wasdirty|=will_dirty_i&(1<<r);
6440 else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6441 // Register moved to a different register
6442 will_dirty_i&=~(1<<r);
6443 wont_dirty_i&=~(1<<r);
6444 will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6445 wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6447 #ifndef DESTRUCTIVE_WRITEBACK
6448 regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6450 regs[i].wasdirty|=will_dirty_i&(1<<r);
6454 will_dirty_i&=~(1<<r);
6455 wont_dirty_i&=~(1<<r);
6456 if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6457 will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6458 wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6461 /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6471 void disassemble_inst(int i)
6473 if (bt[i]) printf("*"); else printf(" ");
6476 printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6478 printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6480 printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6482 if (opcode[i]==0x9&&rt1[i]!=31)
6483 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6485 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6488 printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6490 if(opcode[i]==0xf) //LUI
6491 printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6493 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6497 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6501 printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6505 printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6508 printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6511 printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6514 if((opcode2[i]&0x1d)==0x10)
6515 printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6516 else if((opcode2[i]&0x1d)==0x11)
6517 printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6519 printf (" %x: %s\n",start+i*4,insn[i]);
6523 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6524 else if(opcode2[i]==4)
6525 printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6526 else printf (" %x: %s\n",start+i*4,insn[i]);
6530 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6531 else if(opcode2[i]>3)
6532 printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6533 else printf (" %x: %s\n",start+i*4,insn[i]);
6537 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
6538 else if(opcode2[i]>3)
6539 printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
6540 else printf (" %x: %s\n",start+i*4,insn[i]);
6543 printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6546 printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6549 printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6552 //printf (" %s %8x\n",insn[i],source[i]);
6553 printf (" %x: %s\n",start+i*4,insn[i]);
6557 static void disassemble_inst(int i) {}
6560 #define DRC_TEST_VAL 0x74657374
6562 static void new_dynarec_test(void)
6564 int (*testfunc)(void);
6569 // check structure linkage
6570 if ((u_char *)rcnts - (u_char *)&psxRegs != sizeof(psxRegs))
6572 SysPrintf("linkage_arm* miscompilation/breakage detected.\n");
6575 SysPrintf("testing if we can run recompiled code...\n");
6576 ((volatile u_int *)out)[0]++; // make cache dirty
6578 for (i = 0; i < ARRAY_SIZE(ret); i++) {
6579 out = ndrc->translation_cache;
6580 beginning = start_block();
6581 emit_movimm(DRC_TEST_VAL + i, 0); // test
6584 end_block(beginning);
6585 testfunc = beginning;
6586 ret[i] = testfunc();
6589 if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
6590 SysPrintf("test passed.\n");
6592 SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
6593 out = ndrc->translation_cache;
6596 // clear the state completely, instead of just marking
6597 // things invalid like invalidate_all_pages() does
6598 void new_dynarec_clear_full(void)
6601 out = ndrc->translation_cache;
6602 memset(invalid_code,1,sizeof(invalid_code));
6603 memset(hash_table,0xff,sizeof(hash_table));
6604 memset(mini_ht,-1,sizeof(mini_ht));
6605 memset(restore_candidate,0,sizeof(restore_candidate));
6606 memset(shadow,0,sizeof(shadow));
6608 expirep=16384; // Expiry pointer, +2 blocks
6609 pending_exception=0;
6612 inv_code_start=inv_code_end=~0;
6614 for(n=0;n<4096;n++) ll_clear(jump_in+n);
6615 for(n=0;n<4096;n++) ll_clear(jump_out+n);
6616 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
6619 void new_dynarec_init(void)
6621 SysPrintf("Init new dynarec\n");
6623 #ifdef BASE_ADDR_DYNAMIC
6625 sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
6627 SysPrintf("sceKernelAllocMemBlockForVM failed\n");
6628 int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&ndrc);
6630 SysPrintf("sceKernelGetMemBlockBase failed\n");
6632 uintptr_t desired_addr = 0;
6635 desired_addr = ((uintptr_t)&_end + 0xffffff) & ~0xffffffl;
6637 ndrc = mmap((void *)desired_addr, sizeof(*ndrc),
6638 PROT_READ | PROT_WRITE | PROT_EXEC,
6639 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
6640 if (ndrc == MAP_FAILED) {
6641 SysPrintf("mmap() failed: %s\n", strerror(errno));
6646 #ifndef NO_WRITE_EXEC
6647 // not all systems allow execute in data segment by default
6648 if (mprotect(ndrc, sizeof(ndrc->translation_cache) + sizeof(ndrc->tramp.ops),
6649 PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
6650 SysPrintf("mprotect() failed: %s\n", strerror(errno));
6653 out = ndrc->translation_cache;
6654 cycle_multiplier=200;
6655 new_dynarec_clear_full();
6657 // Copy this into local area so we don't have to put it in every literal pool
6658 invc_ptr=invalid_code;
6663 ram_offset=(uintptr_t)rdram-0x80000000;
6666 SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
6669 void new_dynarec_cleanup(void)
6672 #ifdef BASE_ADDR_DYNAMIC
6674 sceKernelFreeMemBlock(sceBlock);
6677 if (munmap(ndrc, sizeof(*ndrc)) < 0)
6678 SysPrintf("munmap() failed\n");
6681 for(n=0;n<4096;n++) ll_clear(jump_in+n);
6682 for(n=0;n<4096;n++) ll_clear(jump_out+n);
6683 for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
6685 if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
6689 static u_int *get_source_start(u_int addr, u_int *limit)
6691 if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
6692 cycle_multiplier_override = 0;
6694 if (addr < 0x00200000 ||
6695 (0xa0000000 <= addr && addr < 0xa0200000))
6697 // used for BIOS calls mostly?
6698 *limit = (addr&0xa0000000)|0x00200000;
6699 return (u_int *)(rdram + (addr&0x1fffff));
6701 else if (!Config.HLE && (
6702 /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
6703 (0xbfc00000 <= addr && addr < 0xbfc80000)))
6705 // BIOS. The multiplier should be much higher as it's uncached 8bit mem,
6706 // but timings in PCSX are too tied to the interpreter's BIAS
6707 if (!HACK_ENABLED(NDHACK_OVERRIDE_CYCLE_M))
6708 cycle_multiplier_override = 200;
6710 *limit = (addr & 0xfff00000) | 0x80000;
6711 return (u_int *)((u_char *)psxR + (addr&0x7ffff));
6713 else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
6714 *limit = (addr & 0x80600000) + 0x00200000;
6715 return (u_int *)(rdram + (addr&0x1fffff));
6720 static u_int scan_for_ret(u_int addr)
6725 mem = get_source_start(addr, &limit);
6729 if (limit > addr + 0x1000)
6730 limit = addr + 0x1000;
6731 for (; addr < limit; addr += 4, mem++) {
6732 if (*mem == 0x03e00008) // jr $ra
6738 struct savestate_block {
6743 static int addr_cmp(const void *p1_, const void *p2_)
6745 const struct savestate_block *p1 = p1_, *p2 = p2_;
6746 return p1->addr - p2->addr;
6749 int new_dynarec_save_blocks(void *save, int size)
6751 struct savestate_block *blocks = save;
6752 int maxcount = size / sizeof(blocks[0]);
6753 struct savestate_block tmp_blocks[1024];
6754 struct ll_entry *head;
6755 int p, s, d, o, bcnt;
6759 for (p = 0; p < ARRAY_SIZE(jump_in); p++) {
6761 for (head = jump_in[p]; head != NULL; head = head->next) {
6762 tmp_blocks[bcnt].addr = head->vaddr;
6763 tmp_blocks[bcnt].regflags = head->reg_sv_flags;
6768 qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
6770 addr = tmp_blocks[0].addr;
6771 for (s = d = 0; s < bcnt; s++) {
6772 if (tmp_blocks[s].addr < addr)
6774 if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
6775 tmp_blocks[d++] = tmp_blocks[s];
6776 addr = scan_for_ret(tmp_blocks[s].addr);
6779 if (o + d > maxcount)
6781 memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
6785 return o * sizeof(blocks[0]);
6788 void new_dynarec_load_blocks(const void *save, int size)
6790 const struct savestate_block *blocks = save;
6791 int count = size / sizeof(blocks[0]);
6792 u_int regs_save[32];
6796 get_addr(psxRegs.pc);
6798 // change GPRs for speculation to at least partially work..
6799 memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
6800 for (i = 1; i < 32; i++)
6801 psxRegs.GPR.r[i] = 0x80000000;
6803 for (b = 0; b < count; b++) {
6804 for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
6806 psxRegs.GPR.r[i] = 0x1f800000;
6809 get_addr(blocks[b].addr);
6811 for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
6813 psxRegs.GPR.r[i] = 0x80000000;
6817 memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
6820 int new_recompile_block(u_int addr)
6822 u_int pagelimit = 0;
6823 u_int state_rflags = 0;
6826 assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
6827 //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
6829 //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
6831 // this is just for speculation
6832 for (i = 1; i < 32; i++) {
6833 if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
6834 state_rflags |= 1 << i;
6837 start = (u_int)addr&~3;
6838 //assert(((u_int)addr&1)==0); // start-in-delay-slot flag
6839 new_dynarec_did_compile=1;
6840 if (Config.HLE && start == 0x80001000) // hlecall
6842 // XXX: is this enough? Maybe check hleSoftCall?
6843 void *beginning=start_block();
6844 u_int page=get_page(start);
6846 invalid_code[start>>12]=0;
6847 emit_movimm(start,0);
6848 emit_writeword(0,&pcaddr);
6849 emit_far_jump(new_dyna_leave);
6851 end_block(beginning);
6852 ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
6856 source = get_source_start(start, &pagelimit);
6857 if (source == NULL) {
6858 SysPrintf("Compile at bogus memory address: %08x\n", addr);
6862 /* Pass 1: disassemble */
6863 /* Pass 2: register dependencies, branch targets */
6864 /* Pass 3: register allocation */
6865 /* Pass 4: branch dependencies */
6866 /* Pass 5: pre-alloc */
6867 /* Pass 6: optimize clean/dirty state */
6868 /* Pass 7: flag 32-bit registers */
6869 /* Pass 8: assembly */
6870 /* Pass 9: linker */
6871 /* Pass 10: garbage collection / free memory */
6875 unsigned int type,op,op2;
6877 //printf("addr = %x source = %x %x\n", addr,source,source[0]);
6879 /* Pass 1 disassembly */
6881 for(i=0;!done;i++) {
6882 bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
6883 minimum_free_regs[i]=0;
6884 opcode[i]=op=source[i]>>26;
6887 case 0x00: strcpy(insn[i],"special"); type=NI;
6891 case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
6892 case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
6893 case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
6894 case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
6895 case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
6896 case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
6897 case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
6898 case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
6899 case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
6900 case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
6901 case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
6902 case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
6903 case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
6904 case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
6905 case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
6906 case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
6907 case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
6908 case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
6909 case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
6910 case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
6911 case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
6912 case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
6913 case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
6914 case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
6915 case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
6916 case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
6917 case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
6918 case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
6919 case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
6920 case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
6921 case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
6922 case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
6923 case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
6924 case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
6925 case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
6927 case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
6928 case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
6929 case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
6930 case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
6931 case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
6932 case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
6933 case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
6934 case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
6935 case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
6936 case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
6937 case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
6938 case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
6939 case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
6940 case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
6941 case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
6942 case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
6943 case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
6947 case 0x01: strcpy(insn[i],"regimm"); type=NI;
6948 op2=(source[i]>>16)&0x1f;
6951 case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
6952 case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
6953 case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
6954 case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
6955 case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
6956 case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
6957 case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
6958 case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
6959 case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
6960 case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
6961 case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
6962 case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
6963 case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
6964 case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
6967 case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
6968 case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
6969 case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
6970 case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
6971 case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
6972 case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
6973 case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
6974 case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
6975 case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
6976 case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
6977 case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
6978 case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
6979 case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
6980 case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
6981 case 0x10: strcpy(insn[i],"cop0"); type=NI;
6982 op2=(source[i]>>21)&0x1f;
6985 case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
6986 case 0x02: strcpy(insn[i],"CFC0"); type=COP0; break;
6987 case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
6988 case 0x06: strcpy(insn[i],"CTC0"); type=COP0; break;
6989 case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
6992 case 0x11: strcpy(insn[i],"cop1"); type=COP1;
6993 op2=(source[i]>>21)&0x1f;
6996 case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
6997 case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
6998 case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
6999 case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
7000 case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
7001 case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
7002 case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
7003 case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
7005 case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
7006 case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
7007 case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
7008 case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
7009 case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
7010 case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
7011 case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
7013 case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
7015 case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
7016 case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
7017 case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
7018 case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
7020 case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
7021 case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
7023 case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
7024 case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
7025 case 0x30: strcpy(insn[i],"LL"); type=NI; break;
7026 case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
7028 case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
7029 case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
7030 case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
7032 case 0x38: strcpy(insn[i],"SC"); type=NI; break;
7033 case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
7035 case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
7036 case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
7037 case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
7039 case 0x12: strcpy(insn[i],"COP2"); type=NI;
7040 op2=(source[i]>>21)&0x1f;
7042 if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
7043 if (gte_handlers[source[i]&0x3f]!=NULL) {
7044 if (gte_regnames[source[i]&0x3f]!=NULL)
7045 strcpy(insn[i],gte_regnames[source[i]&0x3f]);
7047 snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
7053 case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
7054 case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
7055 case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
7056 case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
7059 case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
7060 case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
7061 case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
7062 default: strcpy(insn[i],"???"); type=NI;
7063 SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
7068 /* Get registers/immediates */
7072 gte_rs[i]=gte_rt[i]=0;
7075 rs1[i]=(source[i]>>21)&0x1f;
7077 rt1[i]=(source[i]>>16)&0x1f;
7079 imm[i]=(short)source[i];
7083 rs1[i]=(source[i]>>21)&0x1f;
7084 rs2[i]=(source[i]>>16)&0x1f;
7087 imm[i]=(short)source[i];
7090 // LWL/LWR only load part of the register,
7091 // therefore the target register must be treated as a source too
7092 rs1[i]=(source[i]>>21)&0x1f;
7093 rs2[i]=(source[i]>>16)&0x1f;
7094 rt1[i]=(source[i]>>16)&0x1f;
7096 imm[i]=(short)source[i];
7097 if(op==0x26) dep1[i]=rt1[i]; // LWR
7100 if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
7101 else rs1[i]=(source[i]>>21)&0x1f;
7103 rt1[i]=(source[i]>>16)&0x1f;
7105 if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
7106 imm[i]=(unsigned short)source[i];
7108 imm[i]=(short)source[i];
7110 if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
7117 // The JAL instruction writes to r31.
7124 rs1[i]=(source[i]>>21)&0x1f;
7128 // The JALR instruction writes to rd.
7130 rt1[i]=(source[i]>>11)&0x1f;
7135 rs1[i]=(source[i]>>21)&0x1f;
7136 rs2[i]=(source[i]>>16)&0x1f;
7139 if(op&2) { // BGTZ/BLEZ
7145 rs1[i]=(source[i]>>21)&0x1f;
7149 if(op2&0x10) { // BxxAL
7151 // NOTE: If the branch is not taken, r31 is still overwritten
7153 likely[i]=(op2&2)>>1;
7156 rs1[i]=(source[i]>>21)&0x1f; // source
7157 rs2[i]=(source[i]>>16)&0x1f; // subtract amount
7158 rt1[i]=(source[i]>>11)&0x1f; // destination
7160 if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7161 dep1[i]=rs1[i];dep2[i]=rs2[i];
7163 else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
7164 dep1[i]=rs1[i];dep2[i]=rs2[i];
7168 rs1[i]=(source[i]>>21)&0x1f; // source
7169 rs2[i]=(source[i]>>16)&0x1f; // divisor
7178 if(op2==0x10) rs1[i]=HIREG; // MFHI
7179 if(op2==0x11) rt1[i]=HIREG; // MTHI
7180 if(op2==0x12) rs1[i]=LOREG; // MFLO
7181 if(op2==0x13) rt1[i]=LOREG; // MTLO
7182 if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
7183 if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
7187 rs1[i]=(source[i]>>16)&0x1f; // target of shift
7188 rs2[i]=(source[i]>>21)&0x1f; // shift amount
7189 rt1[i]=(source[i]>>11)&0x1f; // destination
7193 rs1[i]=(source[i]>>16)&0x1f;
7195 rt1[i]=(source[i]>>11)&0x1f;
7197 imm[i]=(source[i]>>6)&0x1f;
7198 // DSxx32 instructions
7199 if(op2>=0x3c) imm[i]|=0x20;
7206 if(op2==0||op2==2) rt1[i]=(source[i]>>16)&0x1F; // MFC0/CFC0
7207 if(op2==4||op2==6) rs1[i]=(source[i]>>16)&0x1F; // MTC0/CTC0
7208 if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
7209 if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
7216 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
7217 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
7225 if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
7226 if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
7228 int gr=(source[i]>>11)&0x1F;
7231 case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
7232 case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
7233 case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
7234 case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
7238 rs1[i]=(source[i]>>21)&0x1F;
7242 imm[i]=(short)source[i];
7245 rs1[i]=(source[i]>>21)&0x1F;
7249 imm[i]=(short)source[i];
7250 if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
7251 else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
7258 gte_rs[i]=gte_reg_reads[source[i]&0x3f];
7259 gte_rt[i]=gte_reg_writes[source[i]&0x3f];
7260 gte_rt[i]|=1ll<<63; // every op changes flags
7261 if((source[i]&0x3f)==GTE_MVMVA) {
7262 int v = (source[i] >> 15) & 3;
7263 gte_rs[i]&=~0xe3fll;
7264 if(v==3) gte_rs[i]|=0xe00ll;
7265 else gte_rs[i]|=3ll<<(v*2);
7282 /* Calculate branch target addresses */
7284 ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
7285 else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
7286 ba[i]=start+i*4+8; // Ignore never taken branch
7287 else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
7288 ba[i]=start+i*4+8; // Ignore never taken branch
7289 else if(type==CJUMP||type==SJUMP)
7290 ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
7292 if (i > 0 && is_jump(i-1)) {
7294 // branch in delay slot?
7295 if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) {
7296 // don't handle first branch and call interpreter if it's hit
7297 SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7300 // basic load delay detection
7301 else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7302 int t=(ba[i-1]-start)/4;
7303 if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7304 // jump target wants DS result - potential load delay effect
7305 SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7307 bt[t+1]=1; // expected return from interpreter
7309 else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7310 !(i>=3&&is_jump(i-3))) {
7311 // v0 overwrite like this is a sign of trouble, bail out
7312 SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7318 rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7322 i--; // don't compile the DS
7325 /* Is this the end of the block? */
7326 if (i > 0 && is_ujump(i-1)) {
7327 if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7331 if(stop_after_jal) done=1;
7333 if((source[i+1]&0xfc00003f)==0x0d) done=1;
7335 // Don't recompile stuff that's already compiled
7336 if(check_addr(start+i*4+4)) done=1;
7337 // Don't get too close to the limit
7338 if(i>MAXBLOCK/2) done=1;
7340 if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7341 if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7343 // Does the block continue due to a branch?
7346 if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7347 if(ba[j]==start+i*4+4) done=j=0;
7348 if(ba[j]==start+i*4+8) done=j=0;
7351 //assert(i<MAXBLOCK-1);
7352 if(start+i*4==pagelimit-4) done=1;
7353 assert(start+i*4<pagelimit);
7354 if (i==MAXBLOCK-1) done=1;
7355 // Stop if we're compiling junk
7356 if(itype[i]==NI&&opcode[i]==0x11) {
7357 done=stop_after_jal=1;
7358 SysPrintf("Disabled speculative precompilation\n");
7362 if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP) {
7363 if(start+i*4==pagelimit) {
7369 /* Pass 2 - Register dependencies and branch targets */
7371 unneeded_registers(0,slen-1,0);
7373 /* Pass 3 - Register allocation */
7375 struct regstat current; // Current register allocations/status
7377 current.u=unneeded_reg[0];
7378 clear_all_regs(current.regmap);
7379 alloc_reg(¤t,0,CCREG);
7380 dirty_reg(¤t,CCREG);
7383 current.waswritten=0;
7389 // First instruction is delay slot
7394 current.regmap[HOST_BTREG]=BTREG;
7402 for(hr=0;hr<HOST_REGS;hr++)
7404 // Is this really necessary?
7405 if(current.regmap[hr]==0) current.regmap[hr]=-1;
7408 current.waswritten=0;
7411 memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
7412 regs[i].wasconst=current.isconst;
7413 regs[i].wasdirty=current.dirty;
7414 regs[i].loadedconst=0;
7415 if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP) {
7417 current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7424 current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
7425 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
7427 } else { SysPrintf("oops, branch at end of block with no delay slot\n");abort(); }
7431 ds=0; // Skip delay slot, already allocated as part of branch
7432 // ...but we need to alloc it in case something jumps here
7434 current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
7436 current.u=branch_unneeded_reg[i-1];
7438 current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
7440 struct regstat temp;
7441 memcpy(&temp,¤t,sizeof(current));
7442 temp.wasdirty=temp.dirty;
7443 // TODO: Take into account unconditional branches, as below
7444 delayslot_alloc(&temp,i);
7445 memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
7446 regs[i].wasdirty=temp.wasdirty;
7447 regs[i].dirty=temp.dirty;
7451 // Create entry (branch target) regmap
7452 for(hr=0;hr<HOST_REGS;hr++)
7454 int r=temp.regmap[hr];
7456 if(r!=regmap_pre[i][hr]) {
7457 regs[i].regmap_entry[hr]=-1;
7462 if((current.u>>r)&1) {
7463 regs[i].regmap_entry[hr]=-1;
7464 regs[i].regmap[hr]=-1;
7465 //Don't clear regs in the delay slot as the branch might need them
7466 //current.regmap[hr]=-1;
7468 regs[i].regmap_entry[hr]=r;
7471 // First instruction expects CCREG to be allocated
7472 if(i==0&&hr==HOST_CCREG)
7473 regs[i].regmap_entry[hr]=CCREG;
7475 regs[i].regmap_entry[hr]=-1;
7479 else { // Not delay slot
7482 //current.isconst=0; // DEBUG
7483 //current.wasconst=0; // DEBUG
7484 //regs[i].wasconst=0; // DEBUG
7485 clear_const(¤t,rt1[i]);
7486 alloc_cc(¤t,i);
7487 dirty_reg(¤t,CCREG);
7489 alloc_reg(¤t,i,31);
7490 dirty_reg(¤t,31);
7491 //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
7492 //assert(rt1[i+1]!=rt1[i]);
7494 alloc_reg(¤t,i,PTEMP);
7498 delayslot_alloc(¤t,i+1);
7499 //current.isconst=0; // DEBUG
7501 //printf("i=%d, isconst=%x\n",i,current.isconst);
7504 //current.isconst=0;
7505 //current.wasconst=0;
7506 //regs[i].wasconst=0;
7507 clear_const(¤t,rs1[i]);
7508 clear_const(¤t,rt1[i]);
7509 alloc_cc(¤t,i);
7510 dirty_reg(¤t,CCREG);
7511 if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
7512 alloc_reg(¤t,i,rs1[i]);
7514 alloc_reg(¤t,i,rt1[i]);
7515 dirty_reg(¤t,rt1[i]);
7516 assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
7517 assert(rt1[i+1]!=rt1[i]);
7519 alloc_reg(¤t,i,PTEMP);
7523 if(rs1[i]==31) { // JALR
7524 alloc_reg(¤t,i,RHASH);
7525 alloc_reg(¤t,i,RHTBL);
7528 delayslot_alloc(¤t,i+1);
7530 // The delay slot overwrites our source register,
7531 // allocate a temporary register to hold the old value.
7535 delayslot_alloc(¤t,i+1);
7537 alloc_reg(¤t,i,RTEMP);
7539 //current.isconst=0; // DEBUG
7544 //current.isconst=0;
7545 //current.wasconst=0;
7546 //regs[i].wasconst=0;
7547 clear_const(¤t,rs1[i]);
7548 clear_const(¤t,rs2[i]);
7549 if((opcode[i]&0x3E)==4) // BEQ/BNE
7551 alloc_cc(¤t,i);
7552 dirty_reg(¤t,CCREG);
7553 if(rs1[i]) alloc_reg(¤t,i,rs1[i]);
7554 if(rs2[i]) alloc_reg(¤t,i,rs2[i]);
7555 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
7556 (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
7557 // The delay slot overwrites one of our conditions.
7558 // Allocate the branch condition registers instead.
7562 if(rs1[i]) alloc_reg(¤t,i,rs1[i]);
7563 if(rs2[i]) alloc_reg(¤t,i,rs2[i]);
7568 delayslot_alloc(¤t,i+1);
7572 if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
7574 alloc_cc(¤t,i);
7575 dirty_reg(¤t,CCREG);
7576 alloc_reg(¤t,i,rs1[i]);
7577 if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
7578 // The delay slot overwrites one of our conditions.
7579 // Allocate the branch condition registers instead.
7583 if(rs1[i]) alloc_reg(¤t,i,rs1[i]);
7588 delayslot_alloc(¤t,i+1);
7592 // Don't alloc the delay slot yet because we might not execute it
7593 if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
7598 alloc_cc(¤t,i);
7599 dirty_reg(¤t,CCREG);
7600 alloc_reg(¤t,i,rs1[i]);
7601 alloc_reg(¤t,i,rs2[i]);
7604 if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
7609 alloc_cc(¤t,i);
7610 dirty_reg(¤t,CCREG);
7611 alloc_reg(¤t,i,rs1[i]);
7614 //current.isconst=0;
7617 //current.isconst=0;
7618 //current.wasconst=0;
7619 //regs[i].wasconst=0;
7620 clear_const(¤t,rs1[i]);
7621 clear_const(¤t,rt1[i]);
7622 //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
7623 if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
7625 alloc_cc(¤t,i);
7626 dirty_reg(¤t,CCREG);
7627 alloc_reg(¤t,i,rs1[i]);
7628 if (rt1[i]==31) { // BLTZAL/BGEZAL
7629 alloc_reg(¤t,i,31);
7630 dirty_reg(¤t,31);
7631 //#ifdef REG_PREFETCH
7632 //alloc_reg(¤t,i,PTEMP);
7635 if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
7636 ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
7637 // Allocate the branch condition registers instead.
7641 if(rs1[i]) alloc_reg(¤t,i,rs1[i]);
7646 delayslot_alloc(¤t,i+1);
7650 // Don't alloc the delay slot yet because we might not execute it
7651 if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
7656 alloc_cc(¤t,i);
7657 dirty_reg(¤t,CCREG);
7658 alloc_reg(¤t,i,rs1[i]);
7661 //current.isconst=0;
7664 imm16_alloc(¤t,i);
7668 load_alloc(¤t,i);
7672 store_alloc(¤t,i);
7675 alu_alloc(¤t,i);
7678 shift_alloc(¤t,i);
7681 multdiv_alloc(¤t,i);
7684 shiftimm_alloc(¤t,i);
7687 mov_alloc(¤t,i);
7690 cop0_alloc(¤t,i);
7695 cop2_alloc(¤t,i);
7698 c1ls_alloc(¤t,i);
7701 c2ls_alloc(¤t,i);
7704 c2op_alloc(¤t,i);
7709 syscall_alloc(¤t,i);
7712 pagespan_alloc(¤t,i);
7716 // Create entry (branch target) regmap
7717 for(hr=0;hr<HOST_REGS;hr++)
7720 r=current.regmap[hr];
7722 if(r!=regmap_pre[i][hr]) {
7723 // TODO: delay slot (?)
7724 or=get_reg(regmap_pre[i],r); // Get old mapping for this register
7725 if(or<0||(r&63)>=TEMPREG){
7726 regs[i].regmap_entry[hr]=-1;
7730 // Just move it to a different register
7731 regs[i].regmap_entry[hr]=r;
7732 // If it was dirty before, it's still dirty
7733 if((regs[i].wasdirty>>or)&1) dirty_reg(¤t,r&63);
7740 regs[i].regmap_entry[hr]=0;
7745 if((current.u>>r)&1) {
7746 regs[i].regmap_entry[hr]=-1;
7747 //regs[i].regmap[hr]=-1;
7748 current.regmap[hr]=-1;
7750 regs[i].regmap_entry[hr]=r;
7754 // Branches expect CCREG to be allocated at the target
7755 if(regmap_pre[i][hr]==CCREG)
7756 regs[i].regmap_entry[hr]=CCREG;
7758 regs[i].regmap_entry[hr]=-1;
7761 memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
7764 if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
7765 current.waswritten|=1<<rs1[i-1];
7766 current.waswritten&=~(1<<rt1[i]);
7767 current.waswritten&=~(1<<rt2[i]);
7768 if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
7769 current.waswritten&=~(1<<rs1[i]);
7771 /* Branch post-alloc */
7774 current.wasdirty=current.dirty;
7775 switch(itype[i-1]) {
7777 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7778 branch_regs[i-1].isconst=0;
7779 branch_regs[i-1].wasconst=0;
7780 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7781 alloc_cc(&branch_regs[i-1],i-1);
7782 dirty_reg(&branch_regs[i-1],CCREG);
7783 if(rt1[i-1]==31) { // JAL
7784 alloc_reg(&branch_regs[i-1],i-1,31);
7785 dirty_reg(&branch_regs[i-1],31);
7787 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7788 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7791 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7792 branch_regs[i-1].isconst=0;
7793 branch_regs[i-1].wasconst=0;
7794 branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7795 alloc_cc(&branch_regs[i-1],i-1);
7796 dirty_reg(&branch_regs[i-1],CCREG);
7797 alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
7798 if(rt1[i-1]!=0) { // JALR
7799 alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
7800 dirty_reg(&branch_regs[i-1],rt1[i-1]);
7803 if(rs1[i-1]==31) { // JALR
7804 alloc_reg(&branch_regs[i-1],i-1,RHASH);
7805 alloc_reg(&branch_regs[i-1],i-1,RHTBL);
7808 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7809 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7812 if((opcode[i-1]&0x3E)==4) // BEQ/BNE
7814 alloc_cc(¤t,i-1);
7815 dirty_reg(¤t,CCREG);
7816 if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
7817 (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
7818 // The delay slot overwrote one of our conditions
7819 // Delay slot goes after the test (in order)
7820 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7822 delayslot_alloc(¤t,i);
7827 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7828 // Alloc the branch condition registers
7829 if(rs1[i-1]) alloc_reg(¤t,i-1,rs1[i-1]);
7830 if(rs2[i-1]) alloc_reg(¤t,i-1,rs2[i-1]);
7832 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7833 branch_regs[i-1].isconst=0;
7834 branch_regs[i-1].wasconst=0;
7835 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7836 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7839 if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
7841 alloc_cc(¤t,i-1);
7842 dirty_reg(¤t,CCREG);
7843 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
7844 // The delay slot overwrote the branch condition
7845 // Delay slot goes after the test (in order)
7846 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7848 delayslot_alloc(¤t,i);
7853 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
7854 // Alloc the branch condition register
7855 alloc_reg(¤t,i-1,rs1[i-1]);
7857 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7858 branch_regs[i-1].isconst=0;
7859 branch_regs[i-1].wasconst=0;
7860 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7861 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7864 // Alloc the delay slot in case the branch is taken
7865 if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
7867 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7868 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7869 alloc_cc(&branch_regs[i-1],i);
7870 dirty_reg(&branch_regs[i-1],CCREG);
7871 delayslot_alloc(&branch_regs[i-1],i);
7872 branch_regs[i-1].isconst=0;
7873 alloc_reg(¤t,i,CCREG); // Not taken path
7874 dirty_reg(¤t,CCREG);
7875 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7878 if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
7880 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7881 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7882 alloc_cc(&branch_regs[i-1],i);
7883 dirty_reg(&branch_regs[i-1],CCREG);
7884 delayslot_alloc(&branch_regs[i-1],i);
7885 branch_regs[i-1].isconst=0;
7886 alloc_reg(¤t,i,CCREG); // Not taken path
7887 dirty_reg(¤t,CCREG);
7888 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7892 //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
7893 if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
7895 alloc_cc(¤t,i-1);
7896 dirty_reg(¤t,CCREG);
7897 if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
7898 // The delay slot overwrote the branch condition
7899 // Delay slot goes after the test (in order)
7900 current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7902 delayslot_alloc(¤t,i);
7907 current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
7908 // Alloc the branch condition register
7909 alloc_reg(¤t,i-1,rs1[i-1]);
7911 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7912 branch_regs[i-1].isconst=0;
7913 branch_regs[i-1].wasconst=0;
7914 memcpy(&branch_regs[i-1].regmap_entry,¤t.regmap,sizeof(current.regmap));
7915 memcpy(constmap[i],constmap[i-1],sizeof(constmap[i]));
7918 // Alloc the delay slot in case the branch is taken
7919 if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
7921 memcpy(&branch_regs[i-1],¤t,sizeof(current));
7922 branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7923 alloc_cc(&branch_regs[i-1],i);
7924 dirty_reg(&branch_regs[i-1],CCREG);
7925 delayslot_alloc(&branch_regs[i-1],i);
7926 branch_regs[i-1].isconst=0;
7927 alloc_reg(¤t,i,CCREG); // Not taken path
7928 dirty_reg(¤t,CCREG);
7929 memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7931 // FIXME: BLTZAL/BGEZAL
7932 if(opcode2[i-1]&0x10) { // BxxZAL
7933 alloc_reg(&branch_regs[i-1],i-1,31);
7934 dirty_reg(&branch_regs[i-1],31);
7941 if(rt1[i-1]==31) // JAL/JALR
7943 // Subroutine call will return here, don't alloc any registers
7945 clear_all_regs(current.regmap);
7946 alloc_reg(¤t,i,CCREG);
7947 dirty_reg(¤t,CCREG);
7951 // Internal branch will jump here, match registers to caller
7953 clear_all_regs(current.regmap);
7954 alloc_reg(¤t,i,CCREG);
7955 dirty_reg(¤t,CCREG);
7958 if(ba[j]==start+i*4+4) {
7959 memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
7960 current.dirty=branch_regs[j].dirty;
7965 if(ba[j]==start+i*4+4) {
7966 for(hr=0;hr<HOST_REGS;hr++) {
7967 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
7968 current.regmap[hr]=-1;
7970 current.dirty&=branch_regs[j].dirty;
7979 // Count cycles in between branches
7981 if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
7985 #if !defined(DRC_DBG)
7986 else if(itype[i]==C2OP&>e_cycletab[source[i]&0x3f]>2)
7988 // this should really be removed since the real stalls have been implemented,
7989 // but doing so causes sizeable perf regression against the older version
7990 u_int gtec = gte_cycletab[source[i] & 0x3f];
7991 cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? gtec/2 : 2;
7993 else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
7997 else if(itype[i]==C2LS)
7999 // same as with C2OP
8000 cc += HACK_ENABLED(NDHACK_GTE_NO_STALL) ? 4 : 2;
8009 regs[i].dirty=current.dirty;
8010 regs[i].isconst=current.isconst;
8011 memcpy(constmap[i],current_constmap,sizeof(constmap[i]));
8013 for(hr=0;hr<HOST_REGS;hr++) {
8014 if(hr!=EXCLUDE_REG&®s[i].regmap[hr]>=0) {
8015 if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
8016 regs[i].wasconst&=~(1<<hr);
8020 if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
8021 regs[i].waswritten=current.waswritten;
8024 /* Pass 4 - Cull unused host registers */
8028 for (i=slen-1;i>=0;i--)
8031 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8033 if(ba[i]<start || ba[i]>=(start+slen*4))
8035 // Branch out of this block, don't need anything
8041 // Need whatever matches the target
8043 int t=(ba[i]-start)>>2;
8044 for(hr=0;hr<HOST_REGS;hr++)
8046 if(regs[i].regmap_entry[hr]>=0) {
8047 if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
8051 // Conditional branch may need registers for following instructions
8055 nr|=needed_reg[i+2];
8056 for(hr=0;hr<HOST_REGS;hr++)
8058 if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
8059 //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
8063 // Don't need stuff which is overwritten
8064 //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8065 //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8066 // Merge in delay slot
8067 for(hr=0;hr<HOST_REGS;hr++)
8070 // These are overwritten unless the branch is "likely"
8071 // and the delay slot is nullified if not taken
8072 if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8073 if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8075 if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8076 if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8077 if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8078 if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8079 if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
8080 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8081 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8085 else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
8087 // SYSCALL instruction (software interrupt)
8090 else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
8092 // ERET instruction (return from interrupt)
8098 for(hr=0;hr<HOST_REGS;hr++) {
8099 if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
8100 if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
8101 if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8102 if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8106 for(hr=0;hr<HOST_REGS;hr++)
8108 // Overwritten registers are not needed
8109 if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8110 if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8111 if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8112 // Source registers are needed
8113 if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
8114 if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
8115 if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8116 if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8117 if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
8118 if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8119 if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8121 // Don't store a register immediately after writing it,
8122 // may prevent dual-issue.
8123 // But do so if this is a branch target, otherwise we
8124 // might have to load the register before the branch.
8125 if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
8126 if((regmap_pre[i][hr]>0&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
8127 if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8128 if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8130 if((regs[i].regmap_entry[hr]>0&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
8131 if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8132 if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8136 // Cycle count is needed at branches. Assume it is needed at the target too.
8137 if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==SPAN) {
8138 if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8139 if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8144 // Deallocate unneeded registers
8145 for(hr=0;hr<HOST_REGS;hr++)
8148 if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
8149 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
8150 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
8151 (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
8156 regs[i].regmap[hr]=-1;
8157 regs[i].isconst&=~(1<<hr);
8159 regmap_pre[i+2][hr]=-1;
8160 regs[i+2].wasconst&=~(1<<hr);
8165 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8168 if(itype[i+1]==STORE || itype[i+1]==STORELR ||
8169 (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
8172 if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
8173 itype[i+1]==C1LS || itype[i+1]==C2LS)
8175 if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
8176 (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
8177 (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
8178 regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
8179 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
8180 regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
8181 regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
8182 regs[i].regmap[hr]!=map )
8184 regs[i].regmap[hr]=-1;
8185 regs[i].isconst&=~(1<<hr);
8186 if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
8187 (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
8188 (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
8189 branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
8190 (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
8191 branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
8192 branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
8193 branch_regs[i].regmap[hr]!=map)
8195 branch_regs[i].regmap[hr]=-1;
8196 branch_regs[i].regmap_entry[hr]=-1;
8199 if(!likely[i]&&i<slen-2) {
8200 regmap_pre[i+2][hr]=-1;
8201 regs[i+2].wasconst&=~(1<<hr);
8213 if(itype[i]==STORE || itype[i]==STORELR ||
8214 (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
8217 if(itype[i]==LOADLR || itype[i]==STORELR ||
8218 itype[i]==C1LS || itype[i]==C2LS)
8220 if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
8221 regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
8222 (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
8223 (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
8225 if(i<slen-1&&!is_ds[i]) {
8226 assert(regs[i].regmap[hr]<64);
8227 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]>0)
8228 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
8230 SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
8231 assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
8233 regmap_pre[i+1][hr]=-1;
8234 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
8235 regs[i+1].wasconst&=~(1<<hr);
8237 regs[i].regmap[hr]=-1;
8238 regs[i].isconst&=~(1<<hr);
8246 /* Pass 5 - Pre-allocate registers */
8248 // If a register is allocated during a loop, try to allocate it for the
8249 // entire loop, if possible. This avoids loading/storing registers
8250 // inside of the loop.
8252 signed char f_regmap[HOST_REGS];
8253 clear_all_regs(f_regmap);
8254 for(i=0;i<slen-1;i++)
8256 if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8258 if(ba[i]>=start && ba[i]<(start+i*4))
8259 if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
8260 ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
8261 ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
8262 ||itype[i+1]==SHIFT||itype[i+1]==COP1
8263 ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
8265 int t=(ba[i]-start)>>2;
8266 if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP)) // loop_preload can't handle jumps into delay slots
8267 if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
8268 for(hr=0;hr<HOST_REGS;hr++)
8270 if(regs[i].regmap[hr]>=0) {
8271 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8272 // dealloc old register
8274 for(n=0;n<HOST_REGS;n++)
8276 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8278 // and alloc new one
8279 f_regmap[hr]=regs[i].regmap[hr];
8282 if(branch_regs[i].regmap[hr]>=0) {
8283 if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
8284 // dealloc old register
8286 for(n=0;n<HOST_REGS;n++)
8288 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
8290 // and alloc new one
8291 f_regmap[hr]=branch_regs[i].regmap[hr];
8295 if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
8296 f_regmap[hr]=branch_regs[i].regmap[hr];
8298 if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
8299 f_regmap[hr]=branch_regs[i].regmap[hr];
8301 // Avoid dirty->clean transition
8302 #ifdef DESTRUCTIVE_WRITEBACK
8303 if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
8305 // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
8306 // case above, however it's always a good idea. We can't hoist the
8307 // load if the register was already allocated, so there's no point
8308 // wasting time analyzing most of these cases. It only "succeeds"
8309 // when the mapping was different and the load can be replaced with
8310 // a mov, which is of negligible benefit. So such cases are
8312 if(f_regmap[hr]>0) {
8313 if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
8317 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8318 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
8320 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
8321 //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8323 if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
8324 if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
8326 if(get_reg(regs[i].regmap,r&63)<0) break;
8327 if(get_reg(branch_regs[i].regmap,r&63)<0) break;
8330 while(k>1&®s[k-1].regmap[hr]==-1) {
8331 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8332 //printf("no free regs for store %x\n",start+(k-1)*4);
8335 if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
8336 //printf("no-match due to different register\n");
8339 if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP) {
8340 //printf("no-match due to branch\n");
8343 // call/ret fast path assumes no registers allocated
8344 if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
8350 if(regs[k-1].regmap[hr]==f_regmap[hr]&®map_pre[k][hr]==f_regmap[hr]) {
8351 //printf("Extend r%d, %x ->\n",hr,start+k*4);
8353 regs[k].regmap_entry[hr]=f_regmap[hr];
8354 regs[k].regmap[hr]=f_regmap[hr];
8355 regmap_pre[k+1][hr]=f_regmap[hr];
8356 regs[k].wasdirty&=~(1<<hr);
8357 regs[k].dirty&=~(1<<hr);
8358 regs[k].wasdirty|=(1<<hr)®s[k-1].dirty;
8359 regs[k].dirty|=(1<<hr)®s[k].wasdirty;
8360 regs[k].wasconst&=~(1<<hr);
8361 regs[k].isconst&=~(1<<hr);
8366 //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
8369 assert(regs[i-1].regmap[hr]==f_regmap[hr]);
8370 if(regs[i-1].regmap[hr]==f_regmap[hr]&®map_pre[i][hr]==f_regmap[hr]) {
8371 //printf("OK fill %x (r%d)\n",start+i*4,hr);
8372 regs[i].regmap_entry[hr]=f_regmap[hr];
8373 regs[i].regmap[hr]=f_regmap[hr];
8374 regs[i].wasdirty&=~(1<<hr);
8375 regs[i].dirty&=~(1<<hr);
8376 regs[i].wasdirty|=(1<<hr)®s[i-1].dirty;
8377 regs[i].dirty|=(1<<hr)®s[i-1].dirty;
8378 regs[i].wasconst&=~(1<<hr);
8379 regs[i].isconst&=~(1<<hr);
8380 branch_regs[i].regmap_entry[hr]=f_regmap[hr];
8381 branch_regs[i].wasdirty&=~(1<<hr);
8382 branch_regs[i].wasdirty|=(1<<hr)®s[i].dirty;
8383 branch_regs[i].regmap[hr]=f_regmap[hr];
8384 branch_regs[i].dirty&=~(1<<hr);
8385 branch_regs[i].dirty|=(1<<hr)®s[i].dirty;
8386 branch_regs[i].wasconst&=~(1<<hr);
8387 branch_regs[i].isconst&=~(1<<hr);
8389 regmap_pre[i+2][hr]=f_regmap[hr];
8390 regs[i+2].wasdirty&=~(1<<hr);
8391 regs[i+2].wasdirty|=(1<<hr)®s[i].dirty;
8396 // Alloc register clean at beginning of loop,
8397 // but may dirty it in pass 6
8398 regs[k].regmap_entry[hr]=f_regmap[hr];
8399 regs[k].regmap[hr]=f_regmap[hr];
8400 regs[k].dirty&=~(1<<hr);
8401 regs[k].wasconst&=~(1<<hr);
8402 regs[k].isconst&=~(1<<hr);
8403 if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP) {
8404 branch_regs[k].regmap_entry[hr]=f_regmap[hr];
8405 branch_regs[k].regmap[hr]=f_regmap[hr];
8406 branch_regs[k].dirty&=~(1<<hr);
8407 branch_regs[k].wasconst&=~(1<<hr);
8408 branch_regs[k].isconst&=~(1<<hr);
8410 regmap_pre[k+2][hr]=f_regmap[hr];
8411 regs[k+2].wasdirty&=~(1<<hr);
8416 regmap_pre[k+1][hr]=f_regmap[hr];
8417 regs[k+1].wasdirty&=~(1<<hr);
8420 if(regs[j].regmap[hr]==f_regmap[hr])
8421 regs[j].regmap_entry[hr]=f_regmap[hr];
8425 if(regs[j].regmap[hr]>=0)
8427 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
8428 //printf("no-match due to different register\n");
8433 // Stop on unconditional branch
8436 if(itype[j]==CJUMP||itype[j]==SJUMP)
8439 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
8442 if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
8445 if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
8446 //printf("no-match due to different register (branch)\n");
8450 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8451 //printf("No free regs for store %x\n",start+j*4);
8454 assert(f_regmap[hr]<64);
8461 // Non branch or undetermined branch target
8462 for(hr=0;hr<HOST_REGS;hr++)
8464 if(hr!=EXCLUDE_REG) {
8465 if(regs[i].regmap[hr]>=0) {
8466 if(f_regmap[hr]!=regs[i].regmap[hr]) {
8467 // dealloc old register
8469 for(n=0;n<HOST_REGS;n++)
8471 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8473 // and alloc new one
8474 f_regmap[hr]=regs[i].regmap[hr];
8479 // Try to restore cycle count at branch targets
8481 for(j=i;j<slen-1;j++) {
8482 if(regs[j].regmap[HOST_CCREG]!=-1) break;
8483 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8484 //printf("no free regs for store %x\n",start+j*4);
8488 if(regs[j].regmap[HOST_CCREG]==CCREG) {
8490 //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
8492 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8493 regs[k].regmap[HOST_CCREG]=CCREG;
8494 regmap_pre[k+1][HOST_CCREG]=CCREG;
8495 regs[k+1].wasdirty|=1<<HOST_CCREG;
8496 regs[k].dirty|=1<<HOST_CCREG;
8497 regs[k].wasconst&=~(1<<HOST_CCREG);
8498 regs[k].isconst&=~(1<<HOST_CCREG);
8501 regs[j].regmap_entry[HOST_CCREG]=CCREG;
8503 // Work backwards from the branch target
8504 if(j>i&&f_regmap[HOST_CCREG]==CCREG)
8506 //printf("Extend backwards\n");
8509 while(regs[k-1].regmap[HOST_CCREG]==-1) {
8510 if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8511 //printf("no free regs for store %x\n",start+(k-1)*4);
8516 if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
8517 //printf("Extend CC, %x ->\n",start+k*4);
8519 regs[k].regmap_entry[HOST_CCREG]=CCREG;
8520 regs[k].regmap[HOST_CCREG]=CCREG;
8521 regmap_pre[k+1][HOST_CCREG]=CCREG;
8522 regs[k+1].wasdirty|=1<<HOST_CCREG;
8523 regs[k].dirty|=1<<HOST_CCREG;
8524 regs[k].wasconst&=~(1<<HOST_CCREG);
8525 regs[k].isconst&=~(1<<HOST_CCREG);
8530 //printf("Fail Extend CC, %x ->\n",start+k*4);
8534 if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
8535 itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
8536 itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1)
8538 memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
8543 // This allocates registers (if possible) one instruction prior
8544 // to use, which can avoid a load-use penalty on certain CPUs.
8545 for(i=0;i<slen-1;i++)
8547 if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP))
8551 if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
8552 ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
8555 if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
8557 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8559 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8560 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8561 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8562 regs[i].isconst&=~(1<<hr);
8563 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8564 constmap[i][hr]=constmap[i+1][hr];
8565 regs[i+1].wasdirty&=~(1<<hr);
8566 regs[i].dirty&=~(1<<hr);
8571 if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
8573 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8575 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8576 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8577 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8578 regs[i].isconst&=~(1<<hr);
8579 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8580 constmap[i][hr]=constmap[i+1][hr];
8581 regs[i+1].wasdirty&=~(1<<hr);
8582 regs[i].dirty&=~(1<<hr);
8586 // Preload target address for load instruction (non-constant)
8587 if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8588 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
8590 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8592 regs[i].regmap[hr]=rs1[i+1];
8593 regmap_pre[i+1][hr]=rs1[i+1];
8594 regs[i+1].regmap_entry[hr]=rs1[i+1];
8595 regs[i].isconst&=~(1<<hr);
8596 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8597 constmap[i][hr]=constmap[i+1][hr];
8598 regs[i+1].wasdirty&=~(1<<hr);
8599 regs[i].dirty&=~(1<<hr);
8603 // Load source into target register
8604 if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8605 if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
8607 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8609 regs[i].regmap[hr]=rs1[i+1];
8610 regmap_pre[i+1][hr]=rs1[i+1];
8611 regs[i+1].regmap_entry[hr]=rs1[i+1];
8612 regs[i].isconst&=~(1<<hr);
8613 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8614 constmap[i][hr]=constmap[i+1][hr];
8615 regs[i+1].wasdirty&=~(1<<hr);
8616 regs[i].dirty&=~(1<<hr);
8620 // Address for store instruction (non-constant)
8621 if(itype[i+1]==STORE||itype[i+1]==STORELR
8622 ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
8623 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8624 hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
8625 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
8626 else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
8628 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8630 regs[i].regmap[hr]=rs1[i+1];
8631 regmap_pre[i+1][hr]=rs1[i+1];
8632 regs[i+1].regmap_entry[hr]=rs1[i+1];
8633 regs[i].isconst&=~(1<<hr);
8634 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8635 constmap[i][hr]=constmap[i+1][hr];
8636 regs[i+1].wasdirty&=~(1<<hr);
8637 regs[i].dirty&=~(1<<hr);
8641 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
8642 if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8644 hr=get_reg(regs[i+1].regmap,FTEMP);
8646 if(regs[i].regmap[hr]<0&®s[i+1].regmap_entry[hr]<0)
8648 regs[i].regmap[hr]=rs1[i+1];
8649 regmap_pre[i+1][hr]=rs1[i+1];
8650 regs[i+1].regmap_entry[hr]=rs1[i+1];
8651 regs[i].isconst&=~(1<<hr);
8652 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8653 constmap[i][hr]=constmap[i+1][hr];
8654 regs[i+1].wasdirty&=~(1<<hr);
8655 regs[i].dirty&=~(1<<hr);
8657 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
8659 // move it to another register
8660 regs[i+1].regmap[hr]=-1;
8661 regmap_pre[i+2][hr]=-1;
8662 regs[i+1].regmap[nr]=FTEMP;
8663 regmap_pre[i+2][nr]=FTEMP;
8664 regs[i].regmap[nr]=rs1[i+1];
8665 regmap_pre[i+1][nr]=rs1[i+1];
8666 regs[i+1].regmap_entry[nr]=rs1[i+1];
8667 regs[i].isconst&=~(1<<nr);
8668 regs[i+1].isconst&=~(1<<nr);
8669 regs[i].dirty&=~(1<<nr);
8670 regs[i+1].wasdirty&=~(1<<nr);
8671 regs[i+1].dirty&=~(1<<nr);
8672 regs[i+2].wasdirty&=~(1<<nr);
8676 if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
8677 if(itype[i+1]==LOAD)
8678 hr=get_reg(regs[i+1].regmap,rt1[i+1]);
8679 if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
8680 hr=get_reg(regs[i+1].regmap,FTEMP);
8681 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
8682 hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
8683 if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
8685 if(hr>=0&®s[i].regmap[hr]<0) {
8686 int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
8687 if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
8688 regs[i].regmap[hr]=AGEN1+((i+1)&1);
8689 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
8690 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
8691 regs[i].isconst&=~(1<<hr);
8692 regs[i+1].wasdirty&=~(1<<hr);
8693 regs[i].dirty&=~(1<<hr);
8702 /* Pass 6 - Optimize clean/dirty state */
8703 clean_registers(0,slen-1,1);
8705 /* Pass 7 - Identify 32-bit registers */
8706 for (i=slen-1;i>=0;i--)
8708 if(itype[i]==CJUMP||itype[i]==SJUMP)
8710 // Conditional branch
8711 if((source[i]>>16)!=0x1000&&i<slen-2) {
8712 // Mark this address as a branch target since it may be called
8713 // upon return from interrupt
8719 if(itype[slen-1]==SPAN) {
8720 bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
8724 /* Debug/disassembly */
8729 for(r=1;r<=CCREG;r++) {
8730 if((unneeded_reg[i]>>r)&1) {
8731 if(r==HIREG) printf(" HI");
8732 else if(r==LOREG) printf(" LO");
8733 else printf(" r%d",r);
8737 #if defined(__i386__) || defined(__x86_64__)
8738 printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
8741 printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
8743 #if defined(__i386__) || defined(__x86_64__)
8745 if(needed_reg[i]&1) printf("eax ");
8746 if((needed_reg[i]>>1)&1) printf("ecx ");
8747 if((needed_reg[i]>>2)&1) printf("edx ");
8748 if((needed_reg[i]>>3)&1) printf("ebx ");
8749 if((needed_reg[i]>>5)&1) printf("ebp ");
8750 if((needed_reg[i]>>6)&1) printf("esi ");
8751 if((needed_reg[i]>>7)&1) printf("edi ");
8753 printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
8755 if(regs[i].wasdirty&1) printf("eax ");
8756 if((regs[i].wasdirty>>1)&1) printf("ecx ");
8757 if((regs[i].wasdirty>>2)&1) printf("edx ");
8758 if((regs[i].wasdirty>>3)&1) printf("ebx ");
8759 if((regs[i].wasdirty>>5)&1) printf("ebp ");
8760 if((regs[i].wasdirty>>6)&1) printf("esi ");
8761 if((regs[i].wasdirty>>7)&1) printf("edi ");
8764 printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
8766 if(regs[i].wasdirty&1) printf("r0 ");
8767 if((regs[i].wasdirty>>1)&1) printf("r1 ");
8768 if((regs[i].wasdirty>>2)&1) printf("r2 ");
8769 if((regs[i].wasdirty>>3)&1) printf("r3 ");
8770 if((regs[i].wasdirty>>4)&1) printf("r4 ");
8771 if((regs[i].wasdirty>>5)&1) printf("r5 ");
8772 if((regs[i].wasdirty>>6)&1) printf("r6 ");
8773 if((regs[i].wasdirty>>7)&1) printf("r7 ");
8774 if((regs[i].wasdirty>>8)&1) printf("r8 ");
8775 if((regs[i].wasdirty>>9)&1) printf("r9 ");
8776 if((regs[i].wasdirty>>10)&1) printf("r10 ");
8777 if((regs[i].wasdirty>>12)&1) printf("r12 ");
8780 disassemble_inst(i);
8781 //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
8782 #if defined(__i386__) || defined(__x86_64__)
8783 printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
8784 if(regs[i].dirty&1) printf("eax ");
8785 if((regs[i].dirty>>1)&1) printf("ecx ");
8786 if((regs[i].dirty>>2)&1) printf("edx ");
8787 if((regs[i].dirty>>3)&1) printf("ebx ");
8788 if((regs[i].dirty>>5)&1) printf("ebp ");
8789 if((regs[i].dirty>>6)&1) printf("esi ");
8790 if((regs[i].dirty>>7)&1) printf("edi ");
8793 printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
8794 if(regs[i].dirty&1) printf("r0 ");
8795 if((regs[i].dirty>>1)&1) printf("r1 ");
8796 if((regs[i].dirty>>2)&1) printf("r2 ");
8797 if((regs[i].dirty>>3)&1) printf("r3 ");
8798 if((regs[i].dirty>>4)&1) printf("r4 ");
8799 if((regs[i].dirty>>5)&1) printf("r5 ");
8800 if((regs[i].dirty>>6)&1) printf("r6 ");
8801 if((regs[i].dirty>>7)&1) printf("r7 ");
8802 if((regs[i].dirty>>8)&1) printf("r8 ");
8803 if((regs[i].dirty>>9)&1) printf("r9 ");
8804 if((regs[i].dirty>>10)&1) printf("r10 ");
8805 if((regs[i].dirty>>12)&1) printf("r12 ");
8808 if(regs[i].isconst) {
8809 printf("constants: ");
8810 #if defined(__i386__) || defined(__x86_64__)
8811 if(regs[i].isconst&1) printf("eax=%x ",(u_int)constmap[i][0]);
8812 if((regs[i].isconst>>1)&1) printf("ecx=%x ",(u_int)constmap[i][1]);
8813 if((regs[i].isconst>>2)&1) printf("edx=%x ",(u_int)constmap[i][2]);
8814 if((regs[i].isconst>>3)&1) printf("ebx=%x ",(u_int)constmap[i][3]);
8815 if((regs[i].isconst>>5)&1) printf("ebp=%x ",(u_int)constmap[i][5]);
8816 if((regs[i].isconst>>6)&1) printf("esi=%x ",(u_int)constmap[i][6]);
8817 if((regs[i].isconst>>7)&1) printf("edi=%x ",(u_int)constmap[i][7]);
8819 #if defined(__arm__) || defined(__aarch64__)
8821 for (r = 0; r < ARRAY_SIZE(constmap[i]); r++)
8822 if ((regs[i].isconst >> r) & 1)
8823 printf(" r%d=%x", r, (u_int)constmap[i][r]);
8827 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
8828 #if defined(__i386__) || defined(__x86_64__)
8829 printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
8830 if(branch_regs[i].dirty&1) printf("eax ");
8831 if((branch_regs[i].dirty>>1)&1) printf("ecx ");
8832 if((branch_regs[i].dirty>>2)&1) printf("edx ");
8833 if((branch_regs[i].dirty>>3)&1) printf("ebx ");
8834 if((branch_regs[i].dirty>>5)&1) printf("ebp ");
8835 if((branch_regs[i].dirty>>6)&1) printf("esi ");
8836 if((branch_regs[i].dirty>>7)&1) printf("edi ");
8839 printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
8840 if(branch_regs[i].dirty&1) printf("r0 ");
8841 if((branch_regs[i].dirty>>1)&1) printf("r1 ");
8842 if((branch_regs[i].dirty>>2)&1) printf("r2 ");
8843 if((branch_regs[i].dirty>>3)&1) printf("r3 ");
8844 if((branch_regs[i].dirty>>4)&1) printf("r4 ");
8845 if((branch_regs[i].dirty>>5)&1) printf("r5 ");
8846 if((branch_regs[i].dirty>>6)&1) printf("r6 ");
8847 if((branch_regs[i].dirty>>7)&1) printf("r7 ");
8848 if((branch_regs[i].dirty>>8)&1) printf("r8 ");
8849 if((branch_regs[i].dirty>>9)&1) printf("r9 ");
8850 if((branch_regs[i].dirty>>10)&1) printf("r10 ");
8851 if((branch_regs[i].dirty>>12)&1) printf("r12 ");
8857 /* Pass 8 - Assembly */
8858 linkcount=0;stubcount=0;
8859 ds=0;is_delayslot=0;
8861 void *beginning=start_block();
8866 void *instr_addr0_override = NULL;
8868 if (start == 0x80030000) {
8869 // nasty hack for the fastbios thing
8870 // override block entry to this code
8871 instr_addr0_override = out;
8872 emit_movimm(start,0);
8873 // abuse io address var as a flag that we
8874 // have already returned here once
8875 emit_readword(&address,1);
8876 emit_writeword(0,&pcaddr);
8877 emit_writeword(0,&address);
8880 emit_jeq(out + 4*2);
8881 emit_far_jump(new_dyna_leave);
8883 emit_jne(new_dyna_leave);
8888 //if(ds) printf("ds: ");
8889 disassemble_inst(i);
8891 ds=0; // Skip delay slot
8892 if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
8893 instr_addr[i] = NULL;
8895 speculate_register_values(i);
8896 #ifndef DESTRUCTIVE_WRITEBACK
8897 if (i < 2 || !is_ujump(i-2))
8899 wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
8901 if((itype[i]==CJUMP||itype[i]==SJUMP)&&!likely[i]) {
8902 dirty_pre=branch_regs[i].dirty;
8904 dirty_pre=regs[i].dirty;
8908 if (i < 2 || !is_ujump(i-2))
8910 wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
8911 loop_preload(regmap_pre[i],regs[i].regmap_entry);
8913 // branch target entry point
8914 instr_addr[i] = out;
8915 assem_debug("<->\n");
8916 drc_dbg_emit_do_cmp(i);
8919 if(regs[i].regmap_entry[HOST_CCREG]==CCREG&®s[i].regmap[HOST_CCREG]!=CCREG)
8920 wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
8921 load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i],rs2[i]);
8922 address_generation(i,®s[i],regs[i].regmap_entry);
8923 load_consts(regmap_pre[i],regs[i].regmap,i);
8924 if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8926 // Load the delay slot registers if necessary
8927 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
8928 load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
8929 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
8930 load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
8931 if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
8932 load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
8936 // Preload registers for following instruction
8937 if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
8938 if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
8939 load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
8940 if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
8941 if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
8942 load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
8944 // TODO: if(is_ooo(i)) address_generation(i+1);
8946 load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
8947 if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
8948 load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
8952 alu_assemble(i,®s[i]);break;
8954 imm16_assemble(i,®s[i]);break;
8956 shift_assemble(i,®s[i]);break;
8958 shiftimm_assemble(i,®s[i]);break;
8960 load_assemble(i,®s[i]);break;
8962 loadlr_assemble(i,®s[i]);break;
8964 store_assemble(i,®s[i]);break;
8966 storelr_assemble(i,®s[i]);break;
8968 cop0_assemble(i,®s[i]);break;
8970 cop1_assemble(i,®s[i]);break;
8972 c1ls_assemble(i,®s[i]);break;
8974 cop2_assemble(i,®s[i]);break;
8976 c2ls_assemble(i,®s[i]);break;
8978 c2op_assemble(i,®s[i]);break;
8980 multdiv_assemble(i,®s[i]);break;
8982 mov_assemble(i,®s[i]);break;
8984 syscall_assemble(i,®s[i]);break;
8986 hlecall_assemble(i,®s[i]);break;
8988 intcall_assemble(i,®s[i]);break;
8990 ujump_assemble(i,®s[i]);ds=1;break;
8992 rjump_assemble(i,®s[i]);ds=1;break;
8994 cjump_assemble(i,®s[i]);ds=1;break;
8996 sjump_assemble(i,®s[i]);ds=1;break;
8998 pagespan_assemble(i,®s[i]);break;
9003 literal_pool_jumpover(256);
9006 //assert(is_ujump(i-2));
9007 // If the block did not end with an unconditional branch,
9008 // add a jump to the next instruction.
9010 if(!is_ujump(i-2)&&itype[i-1]!=SPAN) {
9011 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
9013 if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) {
9014 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9015 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9016 emit_loadreg(CCREG,HOST_CCREG);
9017 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
9019 else if(!likely[i-2])
9021 store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
9022 assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
9026 store_regs_bt(regs[i-2].regmap,regs[i-2].dirty,start+i*4);
9027 assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
9029 add_to_linker(out,start+i*4,0);
9036 assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
9037 store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
9038 if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
9039 emit_loadreg(CCREG,HOST_CCREG);
9040 emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
9041 add_to_linker(out,start+i*4,0);
9045 // TODO: delay slot stubs?
9047 for(i=0;i<stubcount;i++)
9049 switch(stubs[i].type)
9057 do_readstub(i);break;
9062 do_writestub(i);break;
9066 do_invstub(i);break;
9068 do_cop1stub(i);break;
9070 do_unalignedwritestub(i);break;
9074 if (instr_addr0_override)
9075 instr_addr[0] = instr_addr0_override;
9077 /* Pass 9 - Linker */
9078 for(i=0;i<linkcount;i++)
9080 assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
9082 if (!link_addr[i].ext)
9085 void *addr = check_addr(link_addr[i].target);
9086 emit_extjump(link_addr[i].addr, link_addr[i].target);
9088 set_jump_target(link_addr[i].addr, addr);
9089 add_link(link_addr[i].target,stub);
9092 set_jump_target(link_addr[i].addr, stub);
9097 int target=(link_addr[i].target-start)>>2;
9098 assert(target>=0&&target<slen);
9099 assert(instr_addr[target]);
9100 //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9101 //set_jump_target_fillslot(link_addr[i].addr,instr_addr[target],link_addr[i].ext>>1);
9103 set_jump_target(link_addr[i].addr, instr_addr[target]);
9107 // External Branch Targets (jump_in)
9108 if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
9113 if(instr_addr[i]) // TODO - delay slots (=null)
9115 u_int vaddr=start+i*4;
9116 u_int page=get_page(vaddr);
9117 u_int vpage=get_vpage(vaddr);
9120 assem_debug("%p (%d) <- %8x\n",instr_addr[i],i,start+i*4);
9121 assem_debug("jump_in: %x\n",start+i*4);
9122 ll_add(jump_dirty+vpage,vaddr,out);
9123 void *entry_point = do_dirty_stub(i);
9124 ll_add_flags(jump_in+page,vaddr,state_rflags,entry_point);
9125 // If there was an existing entry in the hash table,
9126 // replace it with the new address.
9127 // Don't add new entries. We'll insert the
9128 // ones that actually get used in check_addr().
9129 struct ht_entry *ht_bin = hash_table_get(vaddr);
9130 if (ht_bin->vaddr[0] == vaddr)
9131 ht_bin->tcaddr[0] = entry_point;
9132 if (ht_bin->vaddr[1] == vaddr)
9133 ht_bin->tcaddr[1] = entry_point;
9138 // Write out the literal pool if necessary
9140 #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
9142 if(((u_int)out)&7) emit_addnop(13);
9144 assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
9145 //printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
9146 memcpy(copy,source,slen*4);
9149 end_block(beginning);
9151 // If we're within 256K of the end of the buffer,
9152 // start over from the beginning. (Is 256K enough?)
9153 if (out > ndrc->translation_cache + sizeof(ndrc->translation_cache) - MAX_OUTPUT_BLOCK_SIZE)
9154 out = ndrc->translation_cache;
9156 // Trap writes to any of the pages we compiled
9157 for(i=start>>12;i<=(start+slen*4)>>12;i++) {
9160 inv_code_start=inv_code_end=~0;
9162 // for PCSX we need to mark all mirrors too
9163 if(get_page(start)<(RAM_SIZE>>12))
9164 for(i=start>>12;i<=(start+slen*4)>>12;i++)
9165 invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
9166 invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
9167 invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
9169 /* Pass 10 - Free memory by expiring oldest blocks */
9171 int end=(((out-ndrc->translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
9174 int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
9175 uintptr_t base=(uintptr_t)ndrc->translation_cache+((expirep>>13)<<shift); // Base address of this block
9176 inv_debug("EXP: Phase %d\n",expirep);
9177 switch((expirep>>11)&3)
9180 // Clear jump_in and jump_dirty
9181 ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
9182 ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
9183 ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
9184 ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
9188 ll_kill_pointers(jump_out[expirep&2047],base,shift);
9189 ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
9194 struct ht_entry *ht_bin = &hash_table[((expirep&2047)<<5)+i];
9195 if (((uintptr_t)ht_bin->tcaddr[1]>>shift) == (base>>shift) ||
9196 (((uintptr_t)ht_bin->tcaddr[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
9197 inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[1],ht_bin->tcaddr[1]);
9198 ht_bin->vaddr[1] = -1;
9199 ht_bin->tcaddr[1] = NULL;
9201 if (((uintptr_t)ht_bin->tcaddr[0]>>shift) == (base>>shift) ||
9202 (((uintptr_t)ht_bin->tcaddr[0]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
9203 inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[0],ht_bin->tcaddr[0]);
9204 ht_bin->vaddr[0] = ht_bin->vaddr[1];
9205 ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
9206 ht_bin->vaddr[1] = -1;
9207 ht_bin->tcaddr[1] = NULL;
9213 if((expirep&2047)==0)
9215 ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
9216 ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
9219 expirep=(expirep+1)&65535;
9224 // vim:shiftwidth=2:expandtab