1383b2f55f9894ad29c555178e8b54ac21d23ab1
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26 #ifdef __MACH__
27 #include <libkern/OSCacheControl.h>
28 #endif
29 #ifdef _3DS
30 #include <3ds_utils.h>
31 #endif
32 #ifdef VITA
33 #include <psp2/kernel/sysmem.h>
34 static int sceBlock;
35 #endif
36
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h" //emulator interface
39 #include "emu_if.h" //emulator interface
40
41 #ifndef ARRAY_SIZE
42 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
43 #endif
44
45 //#define DISASM
46 //#define assem_debug printf
47 //#define inv_debug printf
48 #define assem_debug(...)
49 #define inv_debug(...)
50
51 #ifdef __i386__
52 #include "assem_x86.h"
53 #endif
54 #ifdef __x86_64__
55 #include "assem_x64.h"
56 #endif
57 #ifdef __arm__
58 #include "assem_arm.h"
59 #endif
60 #ifdef __aarch64__
61 #include "assem_arm64.h"
62 #endif
63
64 #define MAXBLOCK 4096
65 #define MAX_OUTPUT_BLOCK_SIZE 262144
66
67 // stubs
68 enum stub_type {
69   CC_STUB = 1,
70   FP_STUB = 2,
71   LOADB_STUB = 3,
72   LOADH_STUB = 4,
73   LOADW_STUB = 5,
74   LOADD_STUB = 6,
75   LOADBU_STUB = 7,
76   LOADHU_STUB = 8,
77   STOREB_STUB = 9,
78   STOREH_STUB = 10,
79   STOREW_STUB = 11,
80   STORED_STUB = 12,
81   STORELR_STUB = 13,
82   INVCODE_STUB = 14,
83 };
84
85 struct regstat
86 {
87   signed char regmap_entry[HOST_REGS];
88   signed char regmap[HOST_REGS];
89   uint64_t wasdirty;
90   uint64_t dirty;
91   uint64_t u;
92   u_int wasconst;
93   u_int isconst;
94   u_int loadedconst;             // host regs that have constants loaded
95   u_int waswritten;              // MIPS regs that were used as store base before
96 };
97
98 // note: asm depends on this layout
99 struct ll_entry
100 {
101   u_int vaddr;
102   u_int reg_sv_flags;
103   void *addr;
104   struct ll_entry *next;
105 };
106
107 struct ht_entry
108 {
109   u_int vaddr[2];
110   void *tcaddr[2];
111 };
112
113 struct code_stub
114 {
115   enum stub_type type;
116   void *addr;
117   void *retaddr;
118   u_int a;
119   uintptr_t b;
120   uintptr_t c;
121   u_int d;
122   u_int e;
123 };
124
125 struct link_entry
126 {
127   void *addr;
128   u_int target;
129   u_int ext;
130 };
131
132   // used by asm:
133   u_char *out;
134   struct ht_entry hash_table[65536]  __attribute__((aligned(16)));
135   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
136   struct ll_entry *jump_dirty[4096];
137
138   static struct ll_entry *jump_out[4096];
139   static u_int start;
140   static u_int *source;
141   static char insn[MAXBLOCK][10];
142   static u_char itype[MAXBLOCK];
143   static u_char opcode[MAXBLOCK];
144   static u_char opcode2[MAXBLOCK];
145   static u_char bt[MAXBLOCK];
146   static u_char rs1[MAXBLOCK];
147   static u_char rs2[MAXBLOCK];
148   static u_char rt1[MAXBLOCK];
149   static u_char rt2[MAXBLOCK];
150   static u_char us1[MAXBLOCK];
151   static u_char us2[MAXBLOCK];
152   static u_char dep1[MAXBLOCK];
153   static u_char dep2[MAXBLOCK];
154   static u_char lt1[MAXBLOCK];
155   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
156   static uint64_t gte_rt[MAXBLOCK];
157   static uint64_t gte_unneeded[MAXBLOCK];
158   static u_int smrv[32]; // speculated MIPS register values
159   static u_int smrv_strong; // mask or regs that are likely to have correct values
160   static u_int smrv_weak; // same, but somewhat less likely
161   static u_int smrv_strong_next; // same, but after current insn executes
162   static u_int smrv_weak_next;
163   static int imm[MAXBLOCK];
164   static u_int ba[MAXBLOCK];
165   static char likely[MAXBLOCK];
166   static char is_ds[MAXBLOCK];
167   static char ooo[MAXBLOCK];
168   static uint64_t unneeded_reg[MAXBLOCK];
169   static uint64_t branch_unneeded_reg[MAXBLOCK];
170   static signed char regmap_pre[MAXBLOCK][HOST_REGS];
171   static uint64_t current_constmap[HOST_REGS];
172   static uint64_t constmap[MAXBLOCK][HOST_REGS];
173   static struct regstat regs[MAXBLOCK];
174   static struct regstat branch_regs[MAXBLOCK];
175   static signed char minimum_free_regs[MAXBLOCK];
176   static u_int needed_reg[MAXBLOCK];
177   static u_int wont_dirty[MAXBLOCK];
178   static u_int will_dirty[MAXBLOCK];
179   static int ccadj[MAXBLOCK];
180   static int slen;
181   static void *instr_addr[MAXBLOCK];
182   static struct link_entry link_addr[MAXBLOCK];
183   static int linkcount;
184   static struct code_stub stubs[MAXBLOCK*3];
185   static int stubcount;
186   static u_int literals[1024][2];
187   static int literalcount;
188   static int is_delayslot;
189   static char shadow[1048576]  __attribute__((aligned(16)));
190   static void *copy;
191   static int expirep;
192   static u_int stop_after_jal;
193 #ifndef RAM_FIXED
194   static uintptr_t ram_offset;
195 #else
196   static const uintptr_t ram_offset=0;
197 #endif
198
199   int new_dynarec_hacks;
200   int new_dynarec_did_compile;
201
202   extern int cycle_count; // ... until end of the timeslice, counts -N -> 0
203   extern int last_count;  // last absolute target, often = next_interupt
204   extern int pcaddr;
205   extern int pending_exception;
206   extern int branch_target;
207   extern u_int mini_ht[32][2];
208   extern u_char restore_candidate[512];
209
210   /* registers that may be allocated */
211   /* 1-31 gpr */
212 #define HIREG 32 // hi
213 #define LOREG 33 // lo
214 //#define FSREG 34 // FPU status (FCSR)
215 #define CSREG 35 // Coprocessor status
216 #define CCREG 36 // Cycle count
217 #define INVCP 37 // Pointer to invalid_code
218 //#define MMREG 38 // Pointer to memory_map
219 //#define ROREG 39 // ram offset (if rdram!=0x80000000)
220 #define TEMPREG 40
221 #define FTEMP 40 // FPU temporary register
222 #define PTEMP 41 // Prefetch temporary register
223 //#define TLREG 42 // TLB mapping offset
224 #define RHASH 43 // Return address hash
225 #define RHTBL 44 // Return address hash table address
226 #define RTEMP 45 // JR/JALR address register
227 #define MAXREG 45
228 #define AGEN1 46 // Address generation temporary register
229 //#define AGEN2 47 // Address generation temporary register
230 //#define MGEN1 48 // Maptable address generation temporary register
231 //#define MGEN2 49 // Maptable address generation temporary register
232 #define BTREG 50 // Branch target temporary register
233
234   /* instruction types */
235 #define NOP 0     // No operation
236 #define LOAD 1    // Load
237 #define STORE 2   // Store
238 #define LOADLR 3  // Unaligned load
239 #define STORELR 4 // Unaligned store
240 #define MOV 5     // Move
241 #define ALU 6     // Arithmetic/logic
242 #define MULTDIV 7 // Multiply/divide
243 #define SHIFT 8   // Shift by register
244 #define SHIFTIMM 9// Shift by immediate
245 #define IMM16 10  // 16-bit immediate
246 #define RJUMP 11  // Unconditional jump to register
247 #define UJUMP 12  // Unconditional jump
248 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
249 #define SJUMP 14  // Conditional branch (regimm format)
250 #define COP0 15   // Coprocessor 0
251 #define COP1 16   // Coprocessor 1
252 #define C1LS 17   // Coprocessor 1 load/store
253 //#define FJUMP 18  // Conditional branch (floating point)
254 //#define FLOAT 19  // Floating point unit
255 //#define FCONV 20  // Convert integer to float
256 //#define FCOMP 21  // Floating point compare (sets FSREG)
257 #define SYSCALL 22// SYSCALL
258 #define OTHER 23  // Other
259 #define SPAN 24   // Branch/delay slot spans 2 pages
260 #define NI 25     // Not implemented
261 #define HLECALL 26// PCSX fake opcodes for HLE
262 #define COP2 27   // Coprocessor 2 move
263 #define C2LS 28   // Coprocessor 2 load/store
264 #define C2OP 29   // Coprocessor 2 operation
265 #define INTCALL 30// Call interpreter to handle rare corner cases
266
267   /* branch codes */
268 #define TAKEN 1
269 #define NOTTAKEN 2
270 #define NULLDS 3
271
272 // asm linkage
273 int new_recompile_block(int addr);
274 void *get_addr_ht(u_int vaddr);
275 void invalidate_block(u_int block);
276 void invalidate_addr(u_int addr);
277 void remove_hash(int vaddr);
278 void dyna_linker();
279 void dyna_linker_ds();
280 void verify_code();
281 void verify_code_vm();
282 void verify_code_ds();
283 void cc_interrupt();
284 void fp_exception();
285 void fp_exception_ds();
286 void jump_syscall_hle();
287 void jump_hlecall();
288 void jump_intcall();
289 void new_dyna_leave();
290
291 // Needed by assembler
292 static void wb_register(signed char r,signed char regmap[],uint64_t dirty);
293 static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty);
294 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr);
295 static void load_all_regs(signed char i_regmap[]);
296 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
297 static void load_regs_entry(int t);
298 static void load_all_consts(signed char regmap[],u_int dirty,int i);
299
300 static int verify_dirty(u_int *ptr);
301 static int get_final_value(int hr, int i, int *value);
302 static void add_stub(enum stub_type type, void *addr, void *retaddr,
303   u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e);
304 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
305   int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist);
306 static void add_to_linker(void *addr, u_int target, int ext);
307 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override);
308 static void *get_direct_memhandler(void *table, u_int addr,
309   enum stub_type type, uintptr_t *addr_host);
310 static void pass_args(int a0, int a1);
311
312 static void mprotect_w_x(void *start, void *end, int is_x)
313 {
314 #ifdef NO_WRITE_EXEC
315   #if defined(VITA)
316   // *Open* enables write on all memory that was
317   // allocated by sceKernelAllocMemBlockForVM()?
318   if (is_x)
319     sceKernelCloseVMDomain();
320   else
321     sceKernelOpenVMDomain();
322   #else
323   u_long mstart = (u_long)start & ~4095ul;
324   u_long mend = (u_long)end;
325   if (mprotect((void *)mstart, mend - mstart,
326                PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
327     SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
328   #endif
329 #endif
330 }
331
332 static void start_tcache_write(void *start, void *end)
333 {
334   mprotect_w_x(start, end, 0);
335 }
336
337 static void end_tcache_write(void *start, void *end)
338 {
339 #ifdef __arm__
340   size_t len = (char *)end - (char *)start;
341   #if   defined(__BLACKBERRY_QNX__)
342   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
343   #elif defined(__MACH__)
344   sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
345   #elif defined(VITA)
346   sceKernelSyncVMDomain(sceBlock, start, len);
347   #elif defined(_3DS)
348   ctr_flush_invalidate_cache();
349   #else
350   __clear_cache(start, end);
351   #endif
352   (void)len;
353 #else
354   __clear_cache(start, end);
355 #endif
356
357   mprotect_w_x(start, end, 1);
358 }
359
360 static void *start_block(void)
361 {
362   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
363   if (end > translation_cache + (1<<TARGET_SIZE_2))
364     end = translation_cache + (1<<TARGET_SIZE_2);
365   start_tcache_write(out, end);
366   return out;
367 }
368
369 static void end_block(void *start)
370 {
371   end_tcache_write(start, out);
372 }
373
374 //#define DEBUG_CYCLE_COUNT 1
375
376 #define NO_CYCLE_PENALTY_THR 12
377
378 int cycle_multiplier; // 100 for 1.0
379
380 static int CLOCK_ADJUST(int x)
381 {
382   int s=(x>>31)|1;
383   return (x * cycle_multiplier + s * 50) / 100;
384 }
385
386 static u_int get_page(u_int vaddr)
387 {
388   u_int page=vaddr&~0xe0000000;
389   if (page < 0x1000000)
390     page &= ~0x0e00000; // RAM mirrors
391   page>>=12;
392   if(page>2048) page=2048+(page&2047);
393   return page;
394 }
395
396 // no virtual mem in PCSX
397 static u_int get_vpage(u_int vaddr)
398 {
399   return get_page(vaddr);
400 }
401
402 static struct ht_entry *hash_table_get(u_int vaddr)
403 {
404   return &hash_table[((vaddr>>16)^vaddr)&0xFFFF];
405 }
406
407 static void hash_table_add(struct ht_entry *ht_bin, u_int vaddr, void *tcaddr)
408 {
409   ht_bin->vaddr[1] = ht_bin->vaddr[0];
410   ht_bin->tcaddr[1] = ht_bin->tcaddr[0];
411   ht_bin->vaddr[0] = vaddr;
412   ht_bin->tcaddr[0] = tcaddr;
413 }
414
415 // some messy ari64's code, seems to rely on unsigned 32bit overflow
416 static int doesnt_expire_soon(void *tcaddr)
417 {
418   u_int diff = (u_int)((u_char *)tcaddr - out) << (32-TARGET_SIZE_2);
419   return diff > (u_int)(0x60000000 + (MAX_OUTPUT_BLOCK_SIZE << (32-TARGET_SIZE_2)));
420 }
421
422 // Get address from virtual address
423 // This is called from the recompiled JR/JALR instructions
424 void *get_addr(u_int vaddr)
425 {
426   u_int page=get_page(vaddr);
427   u_int vpage=get_vpage(vaddr);
428   struct ll_entry *head;
429   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
430   head=jump_in[page];
431   while(head!=NULL) {
432     if(head->vaddr==vaddr) {
433   //printf("TRACE: count=%d next=%d (get_addr match %x: %p)\n",Count,next_interupt,vaddr,head->addr);
434       hash_table_add(hash_table_get(vaddr), vaddr, head->addr);
435       return head->addr;
436     }
437     head=head->next;
438   }
439   head=jump_dirty[vpage];
440   while(head!=NULL) {
441     if(head->vaddr==vaddr) {
442       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %p)\n",Count,next_interupt,vaddr,head->addr);
443       // Don't restore blocks which are about to expire from the cache
444       if (doesnt_expire_soon(head->addr))
445       if (verify_dirty(head->addr)) {
446         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
447         invalid_code[vaddr>>12]=0;
448         inv_code_start=inv_code_end=~0;
449         if(vpage<2048) {
450           restore_candidate[vpage>>3]|=1<<(vpage&7);
451         }
452         else restore_candidate[page>>3]|=1<<(page&7);
453         struct ht_entry *ht_bin = hash_table_get(vaddr);
454         if (ht_bin->vaddr[0] == vaddr)
455           ht_bin->tcaddr[0] = head->addr; // Replace existing entry
456         else
457           hash_table_add(ht_bin, vaddr, head->addr);
458
459         return head->addr;
460       }
461     }
462     head=head->next;
463   }
464   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
465   int r=new_recompile_block(vaddr);
466   if(r==0) return get_addr(vaddr);
467   // Execute in unmapped page, generate pagefault execption
468   Status|=2;
469   Cause=(vaddr<<31)|0x8;
470   EPC=(vaddr&1)?vaddr-5:vaddr;
471   BadVAddr=(vaddr&~1);
472   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
473   EntryHi=BadVAddr&0xFFFFE000;
474   return get_addr_ht(0x80000000);
475 }
476 // Look up address in hash table first
477 void *get_addr_ht(u_int vaddr)
478 {
479   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
480   const struct ht_entry *ht_bin = hash_table_get(vaddr);
481   if (ht_bin->vaddr[0] == vaddr) return ht_bin->tcaddr[0];
482   if (ht_bin->vaddr[1] == vaddr) return ht_bin->tcaddr[1];
483   return get_addr(vaddr);
484 }
485
486 void clear_all_regs(signed char regmap[])
487 {
488   int hr;
489   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
490 }
491
492 signed char get_reg(signed char regmap[],int r)
493 {
494   int hr;
495   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
496   return -1;
497 }
498
499 // Find a register that is available for two consecutive cycles
500 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
501 {
502   int hr;
503   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
504   return -1;
505 }
506
507 int count_free_regs(signed char regmap[])
508 {
509   int count=0;
510   int hr;
511   for(hr=0;hr<HOST_REGS;hr++)
512   {
513     if(hr!=EXCLUDE_REG) {
514       if(regmap[hr]<0) count++;
515     }
516   }
517   return count;
518 }
519
520 void dirty_reg(struct regstat *cur,signed char reg)
521 {
522   int hr;
523   if(!reg) return;
524   for (hr=0;hr<HOST_REGS;hr++) {
525     if((cur->regmap[hr]&63)==reg) {
526       cur->dirty|=1<<hr;
527     }
528   }
529 }
530
531 void set_const(struct regstat *cur,signed char reg,uint64_t value)
532 {
533   int hr;
534   if(!reg) return;
535   for (hr=0;hr<HOST_REGS;hr++) {
536     if(cur->regmap[hr]==reg) {
537       cur->isconst|=1<<hr;
538       current_constmap[hr]=value;
539     }
540     else if((cur->regmap[hr]^64)==reg) {
541       cur->isconst|=1<<hr;
542       current_constmap[hr]=value>>32;
543     }
544   }
545 }
546
547 void clear_const(struct regstat *cur,signed char reg)
548 {
549   int hr;
550   if(!reg) return;
551   for (hr=0;hr<HOST_REGS;hr++) {
552     if((cur->regmap[hr]&63)==reg) {
553       cur->isconst&=~(1<<hr);
554     }
555   }
556 }
557
558 int is_const(struct regstat *cur,signed char reg)
559 {
560   int hr;
561   if(reg<0) return 0;
562   if(!reg) return 1;
563   for (hr=0;hr<HOST_REGS;hr++) {
564     if((cur->regmap[hr]&63)==reg) {
565       return (cur->isconst>>hr)&1;
566     }
567   }
568   return 0;
569 }
570 uint64_t get_const(struct regstat *cur,signed char reg)
571 {
572   int hr;
573   if(!reg) return 0;
574   for (hr=0;hr<HOST_REGS;hr++) {
575     if(cur->regmap[hr]==reg) {
576       return current_constmap[hr];
577     }
578   }
579   SysPrintf("Unknown constant in r%d\n",reg);
580   exit(1);
581 }
582
583 // Least soon needed registers
584 // Look at the next ten instructions and see which registers
585 // will be used.  Try not to reallocate these.
586 void lsn(u_char hsn[], int i, int *preferred_reg)
587 {
588   int j;
589   int b=-1;
590   for(j=0;j<9;j++)
591   {
592     if(i+j>=slen) {
593       j=slen-i-1;
594       break;
595     }
596     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
597     {
598       // Don't go past an unconditonal jump
599       j++;
600       break;
601     }
602   }
603   for(;j>=0;j--)
604   {
605     if(rs1[i+j]) hsn[rs1[i+j]]=j;
606     if(rs2[i+j]) hsn[rs2[i+j]]=j;
607     if(rt1[i+j]) hsn[rt1[i+j]]=j;
608     if(rt2[i+j]) hsn[rt2[i+j]]=j;
609     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
610       // Stores can allocate zero
611       hsn[rs1[i+j]]=j;
612       hsn[rs2[i+j]]=j;
613     }
614     // On some architectures stores need invc_ptr
615     #if defined(HOST_IMM8)
616     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
617       hsn[INVCP]=j;
618     }
619     #endif
620     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
621     {
622       hsn[CCREG]=j;
623       b=j;
624     }
625   }
626   if(b>=0)
627   {
628     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
629     {
630       // Follow first branch
631       int t=(ba[i+b]-start)>>2;
632       j=7-b;if(t+j>=slen) j=slen-t-1;
633       for(;j>=0;j--)
634       {
635         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
636         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
637         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
638         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
639       }
640     }
641     // TODO: preferred register based on backward branch
642   }
643   // Delay slot should preferably not overwrite branch conditions or cycle count
644   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) {
645     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
646     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
647     hsn[CCREG]=1;
648     // ...or hash tables
649     hsn[RHASH]=1;
650     hsn[RHTBL]=1;
651   }
652   // Coprocessor load/store needs FTEMP, even if not declared
653   if(itype[i]==C1LS||itype[i]==C2LS) {
654     hsn[FTEMP]=0;
655   }
656   // Load L/R also uses FTEMP as a temporary register
657   if(itype[i]==LOADLR) {
658     hsn[FTEMP]=0;
659   }
660   // Also SWL/SWR/SDL/SDR
661   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
662     hsn[FTEMP]=0;
663   }
664   // Don't remove the miniht registers
665   if(itype[i]==UJUMP||itype[i]==RJUMP)
666   {
667     hsn[RHASH]=0;
668     hsn[RHTBL]=0;
669   }
670 }
671
672 // We only want to allocate registers if we're going to use them again soon
673 int needed_again(int r, int i)
674 {
675   int j;
676   int b=-1;
677   int rn=10;
678
679   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
680   {
681     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
682       return 0; // Don't need any registers if exiting the block
683   }
684   for(j=0;j<9;j++)
685   {
686     if(i+j>=slen) {
687       j=slen-i-1;
688       break;
689     }
690     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
691     {
692       // Don't go past an unconditonal jump
693       j++;
694       break;
695     }
696     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
697     {
698       break;
699     }
700   }
701   for(;j>=1;j--)
702   {
703     if(rs1[i+j]==r) rn=j;
704     if(rs2[i+j]==r) rn=j;
705     if((unneeded_reg[i+j]>>r)&1) rn=10;
706     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP))
707     {
708       b=j;
709     }
710   }
711   /*
712   if(b>=0)
713   {
714     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
715     {
716       // Follow first branch
717       int o=rn;
718       int t=(ba[i+b]-start)>>2;
719       j=7-b;if(t+j>=slen) j=slen-t-1;
720       for(;j>=0;j--)
721       {
722         if(!((unneeded_reg[t+j]>>r)&1)) {
723           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
724           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
725         }
726         else rn=o;
727       }
728     }
729   }*/
730   if(rn<10) return 1;
731   (void)b;
732   return 0;
733 }
734
735 // Try to match register allocations at the end of a loop with those
736 // at the beginning
737 int loop_reg(int i, int r, int hr)
738 {
739   int j,k;
740   for(j=0;j<9;j++)
741   {
742     if(i+j>=slen) {
743       j=slen-i-1;
744       break;
745     }
746     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
747     {
748       // Don't go past an unconditonal jump
749       j++;
750       break;
751     }
752   }
753   k=0;
754   if(i>0){
755     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)
756       k--;
757   }
758   for(;k<j;k++)
759   {
760     assert(r < 64);
761     if((unneeded_reg[i+k]>>r)&1) return hr;
762     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP))
763     {
764       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
765       {
766         int t=(ba[i+k]-start)>>2;
767         int reg=get_reg(regs[t].regmap_entry,r);
768         if(reg>=0) return reg;
769         //reg=get_reg(regs[t+1].regmap_entry,r);
770         //if(reg>=0) return reg;
771       }
772     }
773   }
774   return hr;
775 }
776
777
778 // Allocate every register, preserving source/target regs
779 void alloc_all(struct regstat *cur,int i)
780 {
781   int hr;
782
783   for(hr=0;hr<HOST_REGS;hr++) {
784     if(hr!=EXCLUDE_REG) {
785       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
786          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
787       {
788         cur->regmap[hr]=-1;
789         cur->dirty&=~(1<<hr);
790       }
791       // Don't need zeros
792       if((cur->regmap[hr]&63)==0)
793       {
794         cur->regmap[hr]=-1;
795         cur->dirty&=~(1<<hr);
796       }
797     }
798   }
799 }
800
801 #ifdef DRC_DBG
802 extern void gen_interupt();
803 extern void do_insn_cmp();
804 #define FUNCNAME(f) { (intptr_t)f, " " #f }
805 static const struct {
806   intptr_t addr;
807   const char *name;
808 } function_names[] = {
809   FUNCNAME(cc_interrupt),
810   FUNCNAME(gen_interupt),
811   FUNCNAME(get_addr_ht),
812   FUNCNAME(get_addr),
813   FUNCNAME(jump_handler_read8),
814   FUNCNAME(jump_handler_read16),
815   FUNCNAME(jump_handler_read32),
816   FUNCNAME(jump_handler_write8),
817   FUNCNAME(jump_handler_write16),
818   FUNCNAME(jump_handler_write32),
819   FUNCNAME(invalidate_addr),
820   FUNCNAME(verify_code_vm),
821   FUNCNAME(verify_code),
822   FUNCNAME(jump_hlecall),
823   FUNCNAME(jump_syscall_hle),
824   FUNCNAME(new_dyna_leave),
825   FUNCNAME(pcsx_mtc0),
826   FUNCNAME(pcsx_mtc0_ds),
827   FUNCNAME(do_insn_cmp),
828 };
829
830 static const char *func_name(intptr_t a)
831 {
832   int i;
833   for (i = 0; i < sizeof(function_names)/sizeof(function_names[0]); i++)
834     if (function_names[i].addr == a)
835       return function_names[i].name;
836   return "";
837 }
838 #else
839 #define func_name(x) ""
840 #endif
841
842 #ifdef __i386__
843 #include "assem_x86.c"
844 #endif
845 #ifdef __x86_64__
846 #include "assem_x64.c"
847 #endif
848 #ifdef __arm__
849 #include "assem_arm.c"
850 #endif
851 #ifdef __aarch64__
852 #include "assem_arm64.c"
853 #endif
854
855 // Add virtual address mapping to linked list
856 void ll_add(struct ll_entry **head,int vaddr,void *addr)
857 {
858   struct ll_entry *new_entry;
859   new_entry=malloc(sizeof(struct ll_entry));
860   assert(new_entry!=NULL);
861   new_entry->vaddr=vaddr;
862   new_entry->reg_sv_flags=0;
863   new_entry->addr=addr;
864   new_entry->next=*head;
865   *head=new_entry;
866 }
867
868 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
869 {
870   ll_add(head,vaddr,addr);
871   (*head)->reg_sv_flags=reg_sv_flags;
872 }
873
874 // Check if an address is already compiled
875 // but don't return addresses which are about to expire from the cache
876 void *check_addr(u_int vaddr)
877 {
878   struct ht_entry *ht_bin = hash_table_get(vaddr);
879   size_t i;
880   for (i = 0; i < ARRAY_SIZE(ht_bin->vaddr); i++) {
881     if (ht_bin->vaddr[i] == vaddr)
882       if (doesnt_expire_soon((u_char *)ht_bin->tcaddr[i] - MAX_OUTPUT_BLOCK_SIZE))
883         if (isclean(ht_bin->tcaddr[i]))
884           return ht_bin->tcaddr[i];
885   }
886   u_int page=get_page(vaddr);
887   struct ll_entry *head;
888   head=jump_in[page];
889   while (head != NULL) {
890     if (head->vaddr == vaddr) {
891       if (doesnt_expire_soon(head->addr)) {
892         // Update existing entry with current address
893         if (ht_bin->vaddr[0] == vaddr) {
894           ht_bin->tcaddr[0] = head->addr;
895           return head->addr;
896         }
897         if (ht_bin->vaddr[1] == vaddr) {
898           ht_bin->tcaddr[1] = head->addr;
899           return head->addr;
900         }
901         // Insert into hash table with low priority.
902         // Don't evict existing entries, as they are probably
903         // addresses that are being accessed frequently.
904         if (ht_bin->vaddr[0] == -1) {
905           ht_bin->vaddr[0] = vaddr;
906           ht_bin->tcaddr[0] = head->addr;
907         }
908         else if (ht_bin->vaddr[1] == -1) {
909           ht_bin->vaddr[1] = vaddr;
910           ht_bin->tcaddr[1] = head->addr;
911         }
912         return head->addr;
913       }
914     }
915     head=head->next;
916   }
917   return 0;
918 }
919
920 void remove_hash(int vaddr)
921 {
922   //printf("remove hash: %x\n",vaddr);
923   struct ht_entry *ht_bin = hash_table_get(vaddr);
924   if (ht_bin->vaddr[1] == vaddr) {
925     ht_bin->vaddr[1] = -1;
926     ht_bin->tcaddr[1] = NULL;
927   }
928   if (ht_bin->vaddr[0] == vaddr) {
929     ht_bin->vaddr[0] = ht_bin->vaddr[1];
930     ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
931     ht_bin->vaddr[1] = -1;
932     ht_bin->tcaddr[1] = NULL;
933   }
934 }
935
936 void ll_remove_matching_addrs(struct ll_entry **head,uintptr_t addr,int shift)
937 {
938   struct ll_entry *next;
939   while(*head) {
940     if(((uintptr_t)((*head)->addr)>>shift)==(addr>>shift) ||
941        ((uintptr_t)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
942     {
943       inv_debug("EXP: Remove pointer to %p (%x)\n",(*head)->addr,(*head)->vaddr);
944       remove_hash((*head)->vaddr);
945       next=(*head)->next;
946       free(*head);
947       *head=next;
948     }
949     else
950     {
951       head=&((*head)->next);
952     }
953   }
954 }
955
956 // Remove all entries from linked list
957 void ll_clear(struct ll_entry **head)
958 {
959   struct ll_entry *cur;
960   struct ll_entry *next;
961   if((cur=*head)) {
962     *head=0;
963     while(cur) {
964       next=cur->next;
965       free(cur);
966       cur=next;
967     }
968   }
969 }
970
971 // Dereference the pointers and remove if it matches
972 static void ll_kill_pointers(struct ll_entry *head,uintptr_t addr,int shift)
973 {
974   while(head) {
975     uintptr_t ptr = (uintptr_t)get_pointer(head->addr);
976     inv_debug("EXP: Lookup pointer to %lx at %p (%x)\n",(long)ptr,head->addr,head->vaddr);
977     if(((ptr>>shift)==(addr>>shift)) ||
978        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
979     {
980       inv_debug("EXP: Kill pointer at %p (%x)\n",head->addr,head->vaddr);
981       void *host_addr=find_extjump_insn(head->addr);
982       #if defined(__arm__) || defined(__aarch64__)
983         mark_clear_cache(host_addr);
984       #endif
985       set_jump_target(host_addr, head->addr);
986     }
987     head=head->next;
988   }
989 }
990
991 // This is called when we write to a compiled block (see do_invstub)
992 void invalidate_page(u_int page)
993 {
994   struct ll_entry *head;
995   struct ll_entry *next;
996   head=jump_in[page];
997   jump_in[page]=0;
998   while(head!=NULL) {
999     inv_debug("INVALIDATE: %x\n",head->vaddr);
1000     remove_hash(head->vaddr);
1001     next=head->next;
1002     free(head);
1003     head=next;
1004   }
1005   head=jump_out[page];
1006   jump_out[page]=0;
1007   while(head!=NULL) {
1008     inv_debug("INVALIDATE: kill pointer to %x (%p)\n",head->vaddr,head->addr);
1009     void *host_addr=find_extjump_insn(head->addr);
1010     #if defined(__arm__) || defined(__aarch64__)
1011       mark_clear_cache(host_addr);
1012     #endif
1013     set_jump_target(host_addr, head->addr);
1014     next=head->next;
1015     free(head);
1016     head=next;
1017   }
1018 }
1019
1020 static void invalidate_block_range(u_int block, u_int first, u_int last)
1021 {
1022   u_int page=get_page(block<<12);
1023   //printf("first=%d last=%d\n",first,last);
1024   invalidate_page(page);
1025   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1026   assert(last<page+5);
1027   // Invalidate the adjacent pages if a block crosses a 4K boundary
1028   while(first<page) {
1029     invalidate_page(first);
1030     first++;
1031   }
1032   for(first=page+1;first<last;first++) {
1033     invalidate_page(first);
1034   }
1035   #if defined(__arm__) || defined(__aarch64__)
1036     do_clear_cache();
1037   #endif
1038
1039   // Don't trap writes
1040   invalid_code[block]=1;
1041
1042   #ifdef USE_MINI_HT
1043   memset(mini_ht,-1,sizeof(mini_ht));
1044   #endif
1045 }
1046
1047 void invalidate_block(u_int block)
1048 {
1049   u_int page=get_page(block<<12);
1050   u_int vpage=get_vpage(block<<12);
1051   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1052   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1053   u_int first,last;
1054   first=last=page;
1055   struct ll_entry *head;
1056   head=jump_dirty[vpage];
1057   //printf("page=%d vpage=%d\n",page,vpage);
1058   while(head!=NULL) {
1059     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1060       u_char *start, *end;
1061       get_bounds(head->addr, &start, &end);
1062       //printf("start: %p end: %p\n", start, end);
1063       if (page < 2048 && start >= rdram && end < rdram+RAM_SIZE) {
1064         if (((start-rdram)>>12) <= page && ((end-1-rdram)>>12) >= page) {
1065           if ((((start-rdram)>>12)&2047) < first) first = ((start-rdram)>>12)&2047;
1066           if ((((end-1-rdram)>>12)&2047) > last)  last = ((end-1-rdram)>>12)&2047;
1067         }
1068       }
1069     }
1070     head=head->next;
1071   }
1072   invalidate_block_range(block,first,last);
1073 }
1074
1075 void invalidate_addr(u_int addr)
1076 {
1077   //static int rhits;
1078   // this check is done by the caller
1079   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1080   u_int page=get_vpage(addr);
1081   if(page<2048) { // RAM
1082     struct ll_entry *head;
1083     u_int addr_min=~0, addr_max=0;
1084     u_int mask=RAM_SIZE-1;
1085     u_int addr_main=0x80000000|(addr&mask);
1086     int pg1;
1087     inv_code_start=addr_main&~0xfff;
1088     inv_code_end=addr_main|0xfff;
1089     pg1=page;
1090     if (pg1>0) {
1091       // must check previous page too because of spans..
1092       pg1--;
1093       inv_code_start-=0x1000;
1094     }
1095     for(;pg1<=page;pg1++) {
1096       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1097         u_char *start_h, *end_h;
1098         u_int start, end;
1099         get_bounds(head->addr, &start_h, &end_h);
1100         start = (uintptr_t)start_h - ram_offset;
1101         end = (uintptr_t)end_h - ram_offset;
1102         if(start<=addr_main&&addr_main<end) {
1103           if(start<addr_min) addr_min=start;
1104           if(end>addr_max) addr_max=end;
1105         }
1106         else if(addr_main<start) {
1107           if(start<inv_code_end)
1108             inv_code_end=start-1;
1109         }
1110         else {
1111           if(end>inv_code_start)
1112             inv_code_start=end;
1113         }
1114       }
1115     }
1116     if (addr_min!=~0) {
1117       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1118       inv_code_start=inv_code_end=~0;
1119       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1120       return;
1121     }
1122     else {
1123       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1124       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1125       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1126       return;
1127     }
1128   }
1129   invalidate_block(addr>>12);
1130 }
1131
1132 // This is called when loading a save state.
1133 // Anything could have changed, so invalidate everything.
1134 void invalidate_all_pages()
1135 {
1136   u_int page;
1137   for(page=0;page<4096;page++)
1138     invalidate_page(page);
1139   for(page=0;page<1048576;page++)
1140     if(!invalid_code[page]) {
1141       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1142       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1143     }
1144   #ifdef USE_MINI_HT
1145   memset(mini_ht,-1,sizeof(mini_ht));
1146   #endif
1147 }
1148
1149 // Add an entry to jump_out after making a link
1150 void add_link(u_int vaddr,void *src)
1151 {
1152   u_int page=get_page(vaddr);
1153   inv_debug("add_link: %p -> %x (%d)\n",src,vaddr,page);
1154   int *ptr=(int *)(src+4);
1155   assert((*ptr&0x0fff0000)==0x059f0000);
1156   (void)ptr;
1157   ll_add(jump_out+page,vaddr,src);
1158   //void *ptr=get_pointer(src);
1159   //inv_debug("add_link: Pointer is to %p\n",ptr);
1160 }
1161
1162 // If a code block was found to be unmodified (bit was set in
1163 // restore_candidate) and it remains unmodified (bit is clear
1164 // in invalid_code) then move the entries for that 4K page from
1165 // the dirty list to the clean list.
1166 void clean_blocks(u_int page)
1167 {
1168   struct ll_entry *head;
1169   inv_debug("INV: clean_blocks page=%d\n",page);
1170   head=jump_dirty[page];
1171   while(head!=NULL) {
1172     if(!invalid_code[head->vaddr>>12]) {
1173       // Don't restore blocks which are about to expire from the cache
1174       if (doesnt_expire_soon(head->addr)) {
1175         if(verify_dirty(head->addr)) {
1176           u_char *start, *end;
1177           //printf("Possibly Restore %x (%p)\n",head->vaddr, head->addr);
1178           u_int i;
1179           u_int inv=0;
1180           get_bounds(head->addr, &start, &end);
1181           if (start - rdram < RAM_SIZE) {
1182             for (i = (start-rdram+0x80000000)>>12; i <= (end-1-rdram+0x80000000)>>12; i++) {
1183               inv|=invalid_code[i];
1184             }
1185           }
1186           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1187             inv=1;
1188           }
1189           if(!inv) {
1190             void *clean_addr = get_clean_addr(head->addr);
1191             if (doesnt_expire_soon(clean_addr)) {
1192               u_int ppage=page;
1193               inv_debug("INV: Restored %x (%p/%p)\n",head->vaddr, head->addr, clean_addr);
1194               //printf("page=%x, addr=%x\n",page,head->vaddr);
1195               //assert(head->vaddr>>12==(page|0x80000));
1196               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1197               struct ht_entry *ht_bin = hash_table_get(head->vaddr);
1198               if (ht_bin->vaddr[0] == head->vaddr)
1199                 ht_bin->tcaddr[0] = clean_addr; // Replace existing entry
1200               if (ht_bin->vaddr[1] == head->vaddr)
1201                 ht_bin->tcaddr[1] = clean_addr; // Replace existing entry
1202             }
1203           }
1204         }
1205       }
1206     }
1207     head=head->next;
1208   }
1209 }
1210
1211 /* Register allocation */
1212
1213 // Note: registers are allocated clean (unmodified state)
1214 // if you intend to modify the register, you must call dirty_reg().
1215 static void alloc_reg(struct regstat *cur,int i,signed char reg)
1216 {
1217   int r,hr;
1218   int preferred_reg = (reg&7);
1219   if(reg==CCREG) preferred_reg=HOST_CCREG;
1220   if(reg==PTEMP||reg==FTEMP) preferred_reg=12;
1221
1222   // Don't allocate unused registers
1223   if((cur->u>>reg)&1) return;
1224
1225   // see if it's already allocated
1226   for(hr=0;hr<HOST_REGS;hr++)
1227   {
1228     if(cur->regmap[hr]==reg) return;
1229   }
1230
1231   // Keep the same mapping if the register was already allocated in a loop
1232   preferred_reg = loop_reg(i,reg,preferred_reg);
1233
1234   // Try to allocate the preferred register
1235   if(cur->regmap[preferred_reg]==-1) {
1236     cur->regmap[preferred_reg]=reg;
1237     cur->dirty&=~(1<<preferred_reg);
1238     cur->isconst&=~(1<<preferred_reg);
1239     return;
1240   }
1241   r=cur->regmap[preferred_reg];
1242   assert(r < 64);
1243   if((cur->u>>r)&1) {
1244     cur->regmap[preferred_reg]=reg;
1245     cur->dirty&=~(1<<preferred_reg);
1246     cur->isconst&=~(1<<preferred_reg);
1247     return;
1248   }
1249
1250   // Clear any unneeded registers
1251   // We try to keep the mapping consistent, if possible, because it
1252   // makes branches easier (especially loops).  So we try to allocate
1253   // first (see above) before removing old mappings.  If this is not
1254   // possible then go ahead and clear out the registers that are no
1255   // longer needed.
1256   for(hr=0;hr<HOST_REGS;hr++)
1257   {
1258     r=cur->regmap[hr];
1259     if(r>=0) {
1260       assert(r < 64);
1261       if((cur->u>>r)&1) {cur->regmap[hr]=-1;break;}
1262     }
1263   }
1264   // Try to allocate any available register, but prefer
1265   // registers that have not been used recently.
1266   if(i>0) {
1267     for(hr=0;hr<HOST_REGS;hr++) {
1268       if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1269         if(regs[i-1].regmap[hr]!=rs1[i-1]&&regs[i-1].regmap[hr]!=rs2[i-1]&&regs[i-1].regmap[hr]!=rt1[i-1]&&regs[i-1].regmap[hr]!=rt2[i-1]) {
1270           cur->regmap[hr]=reg;
1271           cur->dirty&=~(1<<hr);
1272           cur->isconst&=~(1<<hr);
1273           return;
1274         }
1275       }
1276     }
1277   }
1278   // Try to allocate any available register
1279   for(hr=0;hr<HOST_REGS;hr++) {
1280     if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1281       cur->regmap[hr]=reg;
1282       cur->dirty&=~(1<<hr);
1283       cur->isconst&=~(1<<hr);
1284       return;
1285     }
1286   }
1287
1288   // Ok, now we have to evict someone
1289   // Pick a register we hopefully won't need soon
1290   u_char hsn[MAXREG+1];
1291   memset(hsn,10,sizeof(hsn));
1292   int j;
1293   lsn(hsn,i,&preferred_reg);
1294   //printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",cur->regmap[0],cur->regmap[1],cur->regmap[2],cur->regmap[3],cur->regmap[5],cur->regmap[6],cur->regmap[7]);
1295   //printf("hsn(%x): %d %d %d %d %d %d %d\n",start+i*4,hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1296   if(i>0) {
1297     // Don't evict the cycle count at entry points, otherwise the entry
1298     // stub will have to write it.
1299     if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1300     if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1301     for(j=10;j>=3;j--)
1302     {
1303       // Alloc preferred register if available
1304       if(hsn[r=cur->regmap[preferred_reg]&63]==j) {
1305         for(hr=0;hr<HOST_REGS;hr++) {
1306           // Evict both parts of a 64-bit register
1307           if((cur->regmap[hr]&63)==r) {
1308             cur->regmap[hr]=-1;
1309             cur->dirty&=~(1<<hr);
1310             cur->isconst&=~(1<<hr);
1311           }
1312         }
1313         cur->regmap[preferred_reg]=reg;
1314         return;
1315       }
1316       for(r=1;r<=MAXREG;r++)
1317       {
1318         if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1319           for(hr=0;hr<HOST_REGS;hr++) {
1320             if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1321               if(cur->regmap[hr]==r+64) {
1322                 cur->regmap[hr]=reg;
1323                 cur->dirty&=~(1<<hr);
1324                 cur->isconst&=~(1<<hr);
1325                 return;
1326               }
1327             }
1328           }
1329           for(hr=0;hr<HOST_REGS;hr++) {
1330             if(hr!=HOST_CCREG||j<hsn[CCREG]) {
1331               if(cur->regmap[hr]==r) {
1332                 cur->regmap[hr]=reg;
1333                 cur->dirty&=~(1<<hr);
1334                 cur->isconst&=~(1<<hr);
1335                 return;
1336               }
1337             }
1338           }
1339         }
1340       }
1341     }
1342   }
1343   for(j=10;j>=0;j--)
1344   {
1345     for(r=1;r<=MAXREG;r++)
1346     {
1347       if(hsn[r]==j) {
1348         for(hr=0;hr<HOST_REGS;hr++) {
1349           if(cur->regmap[hr]==r+64) {
1350             cur->regmap[hr]=reg;
1351             cur->dirty&=~(1<<hr);
1352             cur->isconst&=~(1<<hr);
1353             return;
1354           }
1355         }
1356         for(hr=0;hr<HOST_REGS;hr++) {
1357           if(cur->regmap[hr]==r) {
1358             cur->regmap[hr]=reg;
1359             cur->dirty&=~(1<<hr);
1360             cur->isconst&=~(1<<hr);
1361             return;
1362           }
1363         }
1364       }
1365     }
1366   }
1367   SysPrintf("This shouldn't happen (alloc_reg)");exit(1);
1368 }
1369
1370 // Allocate a temporary register.  This is done without regard to
1371 // dirty status or whether the register we request is on the unneeded list
1372 // Note: This will only allocate one register, even if called multiple times
1373 static void alloc_reg_temp(struct regstat *cur,int i,signed char reg)
1374 {
1375   int r,hr;
1376   int preferred_reg = -1;
1377
1378   // see if it's already allocated
1379   for(hr=0;hr<HOST_REGS;hr++)
1380   {
1381     if(hr!=EXCLUDE_REG&&cur->regmap[hr]==reg) return;
1382   }
1383
1384   // Try to allocate any available register
1385   for(hr=HOST_REGS-1;hr>=0;hr--) {
1386     if(hr!=EXCLUDE_REG&&cur->regmap[hr]==-1) {
1387       cur->regmap[hr]=reg;
1388       cur->dirty&=~(1<<hr);
1389       cur->isconst&=~(1<<hr);
1390       return;
1391     }
1392   }
1393
1394   // Find an unneeded register
1395   for(hr=HOST_REGS-1;hr>=0;hr--)
1396   {
1397     r=cur->regmap[hr];
1398     if(r>=0) {
1399       assert(r < 64);
1400       if((cur->u>>r)&1) {
1401         if(i==0||((unneeded_reg[i-1]>>r)&1)) {
1402           cur->regmap[hr]=reg;
1403           cur->dirty&=~(1<<hr);
1404           cur->isconst&=~(1<<hr);
1405           return;
1406         }
1407       }
1408     }
1409   }
1410
1411   // Ok, now we have to evict someone
1412   // Pick a register we hopefully won't need soon
1413   // TODO: we might want to follow unconditional jumps here
1414   // TODO: get rid of dupe code and make this into a function
1415   u_char hsn[MAXREG+1];
1416   memset(hsn,10,sizeof(hsn));
1417   int j;
1418   lsn(hsn,i,&preferred_reg);
1419   //printf("hsn: %d %d %d %d %d %d %d\n",hsn[cur->regmap[0]&63],hsn[cur->regmap[1]&63],hsn[cur->regmap[2]&63],hsn[cur->regmap[3]&63],hsn[cur->regmap[5]&63],hsn[cur->regmap[6]&63],hsn[cur->regmap[7]&63]);
1420   if(i>0) {
1421     // Don't evict the cycle count at entry points, otherwise the entry
1422     // stub will have to write it.
1423     if(bt[i]&&hsn[CCREG]>2) hsn[CCREG]=2;
1424     if(i>1&&hsn[CCREG]>2&&(itype[i-2]==RJUMP||itype[i-2]==UJUMP||itype[i-2]==CJUMP||itype[i-2]==SJUMP)) hsn[CCREG]=2;
1425     for(j=10;j>=3;j--)
1426     {
1427       for(r=1;r<=MAXREG;r++)
1428       {
1429         if(hsn[r]==j&&r!=rs1[i-1]&&r!=rs2[i-1]&&r!=rt1[i-1]&&r!=rt2[i-1]) {
1430           for(hr=0;hr<HOST_REGS;hr++) {
1431             if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1432               if(cur->regmap[hr]==r+64) {
1433                 cur->regmap[hr]=reg;
1434                 cur->dirty&=~(1<<hr);
1435                 cur->isconst&=~(1<<hr);
1436                 return;
1437               }
1438             }
1439           }
1440           for(hr=0;hr<HOST_REGS;hr++) {
1441             if(hr!=HOST_CCREG||hsn[CCREG]>2) {
1442               if(cur->regmap[hr]==r) {
1443                 cur->regmap[hr]=reg;
1444                 cur->dirty&=~(1<<hr);
1445                 cur->isconst&=~(1<<hr);
1446                 return;
1447               }
1448             }
1449           }
1450         }
1451       }
1452     }
1453   }
1454   for(j=10;j>=0;j--)
1455   {
1456     for(r=1;r<=MAXREG;r++)
1457     {
1458       if(hsn[r]==j) {
1459         for(hr=0;hr<HOST_REGS;hr++) {
1460           if(cur->regmap[hr]==r+64) {
1461             cur->regmap[hr]=reg;
1462             cur->dirty&=~(1<<hr);
1463             cur->isconst&=~(1<<hr);
1464             return;
1465           }
1466         }
1467         for(hr=0;hr<HOST_REGS;hr++) {
1468           if(cur->regmap[hr]==r) {
1469             cur->regmap[hr]=reg;
1470             cur->dirty&=~(1<<hr);
1471             cur->isconst&=~(1<<hr);
1472             return;
1473           }
1474         }
1475       }
1476     }
1477   }
1478   SysPrintf("This shouldn't happen");exit(1);
1479 }
1480
1481 static void mov_alloc(struct regstat *current,int i)
1482 {
1483   // Note: Don't need to actually alloc the source registers
1484   //alloc_reg(current,i,rs1[i]);
1485   alloc_reg(current,i,rt1[i]);
1486
1487   clear_const(current,rs1[i]);
1488   clear_const(current,rt1[i]);
1489   dirty_reg(current,rt1[i]);
1490 }
1491
1492 static void shiftimm_alloc(struct regstat *current,int i)
1493 {
1494   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1495   {
1496     if(rt1[i]) {
1497       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1498       else lt1[i]=rs1[i];
1499       alloc_reg(current,i,rt1[i]);
1500       dirty_reg(current,rt1[i]);
1501       if(is_const(current,rs1[i])) {
1502         int v=get_const(current,rs1[i]);
1503         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1504         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1505         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1506       }
1507       else clear_const(current,rt1[i]);
1508     }
1509   }
1510   else
1511   {
1512     clear_const(current,rs1[i]);
1513     clear_const(current,rt1[i]);
1514   }
1515
1516   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1517   {
1518     assert(0);
1519   }
1520   if(opcode2[i]==0x3c) // DSLL32
1521   {
1522     assert(0);
1523   }
1524   if(opcode2[i]==0x3e) // DSRL32
1525   {
1526     assert(0);
1527   }
1528   if(opcode2[i]==0x3f) // DSRA32
1529   {
1530     assert(0);
1531   }
1532 }
1533
1534 static void shift_alloc(struct regstat *current,int i)
1535 {
1536   if(rt1[i]) {
1537     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1538     {
1539       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1540       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1541       alloc_reg(current,i,rt1[i]);
1542       if(rt1[i]==rs2[i]) {
1543         alloc_reg_temp(current,i,-1);
1544         minimum_free_regs[i]=1;
1545       }
1546     } else { // DSLLV/DSRLV/DSRAV
1547       assert(0);
1548     }
1549     clear_const(current,rs1[i]);
1550     clear_const(current,rs2[i]);
1551     clear_const(current,rt1[i]);
1552     dirty_reg(current,rt1[i]);
1553   }
1554 }
1555
1556 static void alu_alloc(struct regstat *current,int i)
1557 {
1558   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1559     if(rt1[i]) {
1560       if(rs1[i]&&rs2[i]) {
1561         alloc_reg(current,i,rs1[i]);
1562         alloc_reg(current,i,rs2[i]);
1563       }
1564       else {
1565         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1566         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1567       }
1568       alloc_reg(current,i,rt1[i]);
1569     }
1570   }
1571   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1572     if(rt1[i]) {
1573       alloc_reg(current,i,rs1[i]);
1574       alloc_reg(current,i,rs2[i]);
1575       alloc_reg(current,i,rt1[i]);
1576     }
1577   }
1578   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1579     if(rt1[i]) {
1580       if(rs1[i]&&rs2[i]) {
1581         alloc_reg(current,i,rs1[i]);
1582         alloc_reg(current,i,rs2[i]);
1583       }
1584       else
1585       {
1586         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1587         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1588       }
1589       alloc_reg(current,i,rt1[i]);
1590     }
1591   }
1592   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1593     assert(0);
1594   }
1595   clear_const(current,rs1[i]);
1596   clear_const(current,rs2[i]);
1597   clear_const(current,rt1[i]);
1598   dirty_reg(current,rt1[i]);
1599 }
1600
1601 static void imm16_alloc(struct regstat *current,int i)
1602 {
1603   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1604   else lt1[i]=rs1[i];
1605   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1606   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1607     assert(0);
1608   }
1609   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1610     clear_const(current,rs1[i]);
1611     clear_const(current,rt1[i]);
1612   }
1613   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1614     if(is_const(current,rs1[i])) {
1615       int v=get_const(current,rs1[i]);
1616       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1617       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1618       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1619     }
1620     else clear_const(current,rt1[i]);
1621   }
1622   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1623     if(is_const(current,rs1[i])) {
1624       int v=get_const(current,rs1[i]);
1625       set_const(current,rt1[i],v+imm[i]);
1626     }
1627     else clear_const(current,rt1[i]);
1628   }
1629   else {
1630     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1631   }
1632   dirty_reg(current,rt1[i]);
1633 }
1634
1635 static void load_alloc(struct regstat *current,int i)
1636 {
1637   clear_const(current,rt1[i]);
1638   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1639   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1640   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1641   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1642     alloc_reg(current,i,rt1[i]);
1643     assert(get_reg(current->regmap,rt1[i])>=0);
1644     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1645     {
1646       assert(0);
1647     }
1648     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1649     {
1650       assert(0);
1651     }
1652     dirty_reg(current,rt1[i]);
1653     // LWL/LWR need a temporary register for the old value
1654     if(opcode[i]==0x22||opcode[i]==0x26)
1655     {
1656       alloc_reg(current,i,FTEMP);
1657       alloc_reg_temp(current,i,-1);
1658       minimum_free_regs[i]=1;
1659     }
1660   }
1661   else
1662   {
1663     // Load to r0 or unneeded register (dummy load)
1664     // but we still need a register to calculate the address
1665     if(opcode[i]==0x22||opcode[i]==0x26)
1666     {
1667       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1668     }
1669     alloc_reg_temp(current,i,-1);
1670     minimum_free_regs[i]=1;
1671     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1672     {
1673       assert(0);
1674     }
1675   }
1676 }
1677
1678 void store_alloc(struct regstat *current,int i)
1679 {
1680   clear_const(current,rs2[i]);
1681   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1682   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1683   alloc_reg(current,i,rs2[i]);
1684   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1685     assert(0);
1686   }
1687   #if defined(HOST_IMM8)
1688   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1689   else alloc_reg(current,i,INVCP);
1690   #endif
1691   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1692     alloc_reg(current,i,FTEMP);
1693   }
1694   // We need a temporary register for address generation
1695   alloc_reg_temp(current,i,-1);
1696   minimum_free_regs[i]=1;
1697 }
1698
1699 void c1ls_alloc(struct regstat *current,int i)
1700 {
1701   //clear_const(current,rs1[i]); // FIXME
1702   clear_const(current,rt1[i]);
1703   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1704   alloc_reg(current,i,CSREG); // Status
1705   alloc_reg(current,i,FTEMP);
1706   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1707     assert(0);
1708   }
1709   #if defined(HOST_IMM8)
1710   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1711   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1712     alloc_reg(current,i,INVCP);
1713   #endif
1714   // We need a temporary register for address generation
1715   alloc_reg_temp(current,i,-1);
1716 }
1717
1718 void c2ls_alloc(struct regstat *current,int i)
1719 {
1720   clear_const(current,rt1[i]);
1721   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1722   alloc_reg(current,i,FTEMP);
1723   #if defined(HOST_IMM8)
1724   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1725   if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1726     alloc_reg(current,i,INVCP);
1727   #endif
1728   // We need a temporary register for address generation
1729   alloc_reg_temp(current,i,-1);
1730   minimum_free_regs[i]=1;
1731 }
1732
1733 #ifndef multdiv_alloc
1734 void multdiv_alloc(struct regstat *current,int i)
1735 {
1736   //  case 0x18: MULT
1737   //  case 0x19: MULTU
1738   //  case 0x1A: DIV
1739   //  case 0x1B: DIVU
1740   //  case 0x1C: DMULT
1741   //  case 0x1D: DMULTU
1742   //  case 0x1E: DDIV
1743   //  case 0x1F: DDIVU
1744   clear_const(current,rs1[i]);
1745   clear_const(current,rs2[i]);
1746   if(rs1[i]&&rs2[i])
1747   {
1748     if((opcode2[i]&4)==0) // 32-bit
1749     {
1750       current->u&=~(1LL<<HIREG);
1751       current->u&=~(1LL<<LOREG);
1752       alloc_reg(current,i,HIREG);
1753       alloc_reg(current,i,LOREG);
1754       alloc_reg(current,i,rs1[i]);
1755       alloc_reg(current,i,rs2[i]);
1756       dirty_reg(current,HIREG);
1757       dirty_reg(current,LOREG);
1758     }
1759     else // 64-bit
1760     {
1761       assert(0);
1762     }
1763   }
1764   else
1765   {
1766     // Multiply by zero is zero.
1767     // MIPS does not have a divide by zero exception.
1768     // The result is undefined, we return zero.
1769     alloc_reg(current,i,HIREG);
1770     alloc_reg(current,i,LOREG);
1771     dirty_reg(current,HIREG);
1772     dirty_reg(current,LOREG);
1773   }
1774 }
1775 #endif
1776
1777 void cop0_alloc(struct regstat *current,int i)
1778 {
1779   if(opcode2[i]==0) // MFC0
1780   {
1781     if(rt1[i]) {
1782       clear_const(current,rt1[i]);
1783       alloc_all(current,i);
1784       alloc_reg(current,i,rt1[i]);
1785       dirty_reg(current,rt1[i]);
1786     }
1787   }
1788   else if(opcode2[i]==4) // MTC0
1789   {
1790     if(rs1[i]){
1791       clear_const(current,rs1[i]);
1792       alloc_reg(current,i,rs1[i]);
1793       alloc_all(current,i);
1794     }
1795     else {
1796       alloc_all(current,i); // FIXME: Keep r0
1797       current->u&=~1LL;
1798       alloc_reg(current,i,0);
1799     }
1800   }
1801   else
1802   {
1803     // TLBR/TLBWI/TLBWR/TLBP/ERET
1804     assert(opcode2[i]==0x10);
1805     alloc_all(current,i);
1806   }
1807   minimum_free_regs[i]=HOST_REGS;
1808 }
1809
1810 static void cop12_alloc(struct regstat *current,int i)
1811 {
1812   alloc_reg(current,i,CSREG); // Load status
1813   if(opcode2[i]<3) // MFC1/CFC1
1814   {
1815     if(rt1[i]){
1816       clear_const(current,rt1[i]);
1817       alloc_reg(current,i,rt1[i]);
1818       dirty_reg(current,rt1[i]);
1819     }
1820     alloc_reg_temp(current,i,-1);
1821   }
1822   else if(opcode2[i]>3) // MTC1/CTC1
1823   {
1824     if(rs1[i]){
1825       clear_const(current,rs1[i]);
1826       alloc_reg(current,i,rs1[i]);
1827     }
1828     else {
1829       current->u&=~1LL;
1830       alloc_reg(current,i,0);
1831     }
1832     alloc_reg_temp(current,i,-1);
1833   }
1834   minimum_free_regs[i]=1;
1835 }
1836
1837 void c2op_alloc(struct regstat *current,int i)
1838 {
1839   alloc_reg_temp(current,i,-1);
1840 }
1841
1842 void syscall_alloc(struct regstat *current,int i)
1843 {
1844   alloc_cc(current,i);
1845   dirty_reg(current,CCREG);
1846   alloc_all(current,i);
1847   minimum_free_regs[i]=HOST_REGS;
1848   current->isconst=0;
1849 }
1850
1851 void delayslot_alloc(struct regstat *current,int i)
1852 {
1853   switch(itype[i]) {
1854     case UJUMP:
1855     case CJUMP:
1856     case SJUMP:
1857     case RJUMP:
1858     case SYSCALL:
1859     case HLECALL:
1860     case SPAN:
1861       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1862       SysPrintf("Disabled speculative precompilation\n");
1863       stop_after_jal=1;
1864       break;
1865     case IMM16:
1866       imm16_alloc(current,i);
1867       break;
1868     case LOAD:
1869     case LOADLR:
1870       load_alloc(current,i);
1871       break;
1872     case STORE:
1873     case STORELR:
1874       store_alloc(current,i);
1875       break;
1876     case ALU:
1877       alu_alloc(current,i);
1878       break;
1879     case SHIFT:
1880       shift_alloc(current,i);
1881       break;
1882     case MULTDIV:
1883       multdiv_alloc(current,i);
1884       break;
1885     case SHIFTIMM:
1886       shiftimm_alloc(current,i);
1887       break;
1888     case MOV:
1889       mov_alloc(current,i);
1890       break;
1891     case COP0:
1892       cop0_alloc(current,i);
1893       break;
1894     case COP1:
1895     case COP2:
1896       cop12_alloc(current,i);
1897       break;
1898     case C1LS:
1899       c1ls_alloc(current,i);
1900       break;
1901     case C2LS:
1902       c2ls_alloc(current,i);
1903       break;
1904     case C2OP:
1905       c2op_alloc(current,i);
1906       break;
1907   }
1908 }
1909
1910 // Special case where a branch and delay slot span two pages in virtual memory
1911 static void pagespan_alloc(struct regstat *current,int i)
1912 {
1913   current->isconst=0;
1914   current->wasconst=0;
1915   regs[i].wasconst=0;
1916   minimum_free_regs[i]=HOST_REGS;
1917   alloc_all(current,i);
1918   alloc_cc(current,i);
1919   dirty_reg(current,CCREG);
1920   if(opcode[i]==3) // JAL
1921   {
1922     alloc_reg(current,i,31);
1923     dirty_reg(current,31);
1924   }
1925   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1926   {
1927     alloc_reg(current,i,rs1[i]);
1928     if (rt1[i]!=0) {
1929       alloc_reg(current,i,rt1[i]);
1930       dirty_reg(current,rt1[i]);
1931     }
1932   }
1933   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1934   {
1935     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1936     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1937   }
1938   else
1939   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1940   {
1941     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1942   }
1943   //else ...
1944 }
1945
1946 static void add_stub(enum stub_type type, void *addr, void *retaddr,
1947   u_int a, uintptr_t b, uintptr_t c, u_int d, u_int e)
1948 {
1949   assert(a < ARRAY_SIZE(stubs));
1950   stubs[stubcount].type = type;
1951   stubs[stubcount].addr = addr;
1952   stubs[stubcount].retaddr = retaddr;
1953   stubs[stubcount].a = a;
1954   stubs[stubcount].b = b;
1955   stubs[stubcount].c = c;
1956   stubs[stubcount].d = d;
1957   stubs[stubcount].e = e;
1958   stubcount++;
1959 }
1960
1961 static void add_stub_r(enum stub_type type, void *addr, void *retaddr,
1962   int i, int addr_reg, struct regstat *i_regs, int ccadj, u_int reglist)
1963 {
1964   add_stub(type, addr, retaddr, i, addr_reg, (uintptr_t)i_regs, ccadj, reglist);
1965 }
1966
1967 // Write out a single register
1968 static void wb_register(signed char r,signed char regmap[],uint64_t dirty)
1969 {
1970   int hr;
1971   for(hr=0;hr<HOST_REGS;hr++) {
1972     if(hr!=EXCLUDE_REG) {
1973       if((regmap[hr]&63)==r) {
1974         if((dirty>>hr)&1) {
1975           assert(regmap[hr]<64);
1976           emit_storereg(r,hr);
1977         }
1978       }
1979     }
1980   }
1981 }
1982
1983 static void wb_valid(signed char pre[],signed char entry[],u_int dirty_pre,u_int dirty,uint64_t u)
1984 {
1985   //if(dirty_pre==dirty) return;
1986   int hr,reg;
1987   for(hr=0;hr<HOST_REGS;hr++) {
1988     if(hr!=EXCLUDE_REG) {
1989       reg=pre[hr];
1990       if(((~u)>>(reg&63))&1) {
1991         if(reg>0) {
1992           if(((dirty_pre&~dirty)>>hr)&1) {
1993             if(reg>0&&reg<34) {
1994               emit_storereg(reg,hr);
1995             }
1996             else if(reg>=64) {
1997               assert(0);
1998             }
1999           }
2000         }
2001       }
2002     }
2003   }
2004 }
2005
2006 void rlist()
2007 {
2008   int i;
2009   printf("TRACE: ");
2010   for(i=0;i<32;i++)
2011     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2012   printf("\n");
2013 }
2014
2015 // trashes r2
2016 static void pass_args(int a0, int a1)
2017 {
2018   if(a0==1&&a1==0) {
2019     // must swap
2020     emit_mov(a0,2); emit_mov(a1,1); emit_mov(2,0);
2021   }
2022   else if(a0!=0&&a1==0) {
2023     emit_mov(a1,1);
2024     if (a0>=0) emit_mov(a0,0);
2025   }
2026   else {
2027     if(a0>=0&&a0!=0) emit_mov(a0,0);
2028     if(a1>=0&&a1!=1) emit_mov(a1,1);
2029   }
2030 }
2031
2032 static void alu_assemble(int i,struct regstat *i_regs)
2033 {
2034   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2035     if(rt1[i]) {
2036       signed char s1,s2,t;
2037       t=get_reg(i_regs->regmap,rt1[i]);
2038       if(t>=0) {
2039         s1=get_reg(i_regs->regmap,rs1[i]);
2040         s2=get_reg(i_regs->regmap,rs2[i]);
2041         if(rs1[i]&&rs2[i]) {
2042           assert(s1>=0);
2043           assert(s2>=0);
2044           if(opcode2[i]&2) emit_sub(s1,s2,t);
2045           else emit_add(s1,s2,t);
2046         }
2047         else if(rs1[i]) {
2048           if(s1>=0) emit_mov(s1,t);
2049           else emit_loadreg(rs1[i],t);
2050         }
2051         else if(rs2[i]) {
2052           if(s2>=0) {
2053             if(opcode2[i]&2) emit_neg(s2,t);
2054             else emit_mov(s2,t);
2055           }
2056           else {
2057             emit_loadreg(rs2[i],t);
2058             if(opcode2[i]&2) emit_neg(t,t);
2059           }
2060         }
2061         else emit_zeroreg(t);
2062       }
2063     }
2064   }
2065   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2066     assert(0);
2067   }
2068   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2069     if(rt1[i]) {
2070       signed char s1l,s2l,t;
2071       {
2072         t=get_reg(i_regs->regmap,rt1[i]);
2073         //assert(t>=0);
2074         if(t>=0) {
2075           s1l=get_reg(i_regs->regmap,rs1[i]);
2076           s2l=get_reg(i_regs->regmap,rs2[i]);
2077           if(rs2[i]==0) // rx<r0
2078           {
2079             assert(s1l>=0);
2080             if(opcode2[i]==0x2a) // SLT
2081               emit_shrimm(s1l,31,t);
2082             else // SLTU (unsigned can not be less than zero)
2083               emit_zeroreg(t);
2084           }
2085           else if(rs1[i]==0) // r0<rx
2086           {
2087             assert(s2l>=0);
2088             if(opcode2[i]==0x2a) // SLT
2089               emit_set_gz32(s2l,t);
2090             else // SLTU (set if not zero)
2091               emit_set_nz32(s2l,t);
2092           }
2093           else{
2094             assert(s1l>=0);assert(s2l>=0);
2095             if(opcode2[i]==0x2a) // SLT
2096               emit_set_if_less32(s1l,s2l,t);
2097             else // SLTU
2098               emit_set_if_carry32(s1l,s2l,t);
2099           }
2100         }
2101       }
2102     }
2103   }
2104   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2105     if(rt1[i]) {
2106       signed char s1l,s2l,tl;
2107       tl=get_reg(i_regs->regmap,rt1[i]);
2108       {
2109         if(tl>=0) {
2110           s1l=get_reg(i_regs->regmap,rs1[i]);
2111           s2l=get_reg(i_regs->regmap,rs2[i]);
2112           if(rs1[i]&&rs2[i]) {
2113             assert(s1l>=0);
2114             assert(s2l>=0);
2115             if(opcode2[i]==0x24) { // AND
2116               emit_and(s1l,s2l,tl);
2117             } else
2118             if(opcode2[i]==0x25) { // OR
2119               emit_or(s1l,s2l,tl);
2120             } else
2121             if(opcode2[i]==0x26) { // XOR
2122               emit_xor(s1l,s2l,tl);
2123             } else
2124             if(opcode2[i]==0x27) { // NOR
2125               emit_or(s1l,s2l,tl);
2126               emit_not(tl,tl);
2127             }
2128           }
2129           else
2130           {
2131             if(opcode2[i]==0x24) { // AND
2132               emit_zeroreg(tl);
2133             } else
2134             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2135               if(rs1[i]){
2136                 if(s1l>=0) emit_mov(s1l,tl);
2137                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2138               }
2139               else
2140               if(rs2[i]){
2141                 if(s2l>=0) emit_mov(s2l,tl);
2142                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2143               }
2144               else emit_zeroreg(tl);
2145             } else
2146             if(opcode2[i]==0x27) { // NOR
2147               if(rs1[i]){
2148                 if(s1l>=0) emit_not(s1l,tl);
2149                 else {
2150                   emit_loadreg(rs1[i],tl);
2151                   emit_not(tl,tl);
2152                 }
2153               }
2154               else
2155               if(rs2[i]){
2156                 if(s2l>=0) emit_not(s2l,tl);
2157                 else {
2158                   emit_loadreg(rs2[i],tl);
2159                   emit_not(tl,tl);
2160                 }
2161               }
2162               else emit_movimm(-1,tl);
2163             }
2164           }
2165         }
2166       }
2167     }
2168   }
2169 }
2170
2171 void imm16_assemble(int i,struct regstat *i_regs)
2172 {
2173   if (opcode[i]==0x0f) { // LUI
2174     if(rt1[i]) {
2175       signed char t;
2176       t=get_reg(i_regs->regmap,rt1[i]);
2177       //assert(t>=0);
2178       if(t>=0) {
2179         if(!((i_regs->isconst>>t)&1))
2180           emit_movimm(imm[i]<<16,t);
2181       }
2182     }
2183   }
2184   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2185     if(rt1[i]) {
2186       signed char s,t;
2187       t=get_reg(i_regs->regmap,rt1[i]);
2188       s=get_reg(i_regs->regmap,rs1[i]);
2189       if(rs1[i]) {
2190         //assert(t>=0);
2191         //assert(s>=0);
2192         if(t>=0) {
2193           if(!((i_regs->isconst>>t)&1)) {
2194             if(s<0) {
2195               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2196               emit_addimm(t,imm[i],t);
2197             }else{
2198               if(!((i_regs->wasconst>>s)&1))
2199                 emit_addimm(s,imm[i],t);
2200               else
2201                 emit_movimm(constmap[i][s]+imm[i],t);
2202             }
2203           }
2204         }
2205       } else {
2206         if(t>=0) {
2207           if(!((i_regs->isconst>>t)&1))
2208             emit_movimm(imm[i],t);
2209         }
2210       }
2211     }
2212   }
2213   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2214     if(rt1[i]) {
2215       signed char sh,sl,th,tl;
2216       th=get_reg(i_regs->regmap,rt1[i]|64);
2217       tl=get_reg(i_regs->regmap,rt1[i]);
2218       sh=get_reg(i_regs->regmap,rs1[i]|64);
2219       sl=get_reg(i_regs->regmap,rs1[i]);
2220       if(tl>=0) {
2221         if(rs1[i]) {
2222           assert(sh>=0);
2223           assert(sl>=0);
2224           if(th>=0) {
2225             emit_addimm64_32(sh,sl,imm[i],th,tl);
2226           }
2227           else {
2228             emit_addimm(sl,imm[i],tl);
2229           }
2230         } else {
2231           emit_movimm(imm[i],tl);
2232           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2233         }
2234       }
2235     }
2236   }
2237   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2238     if(rt1[i]) {
2239       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2240       signed char sl,t;
2241       t=get_reg(i_regs->regmap,rt1[i]);
2242       sl=get_reg(i_regs->regmap,rs1[i]);
2243       //assert(t>=0);
2244       if(t>=0) {
2245         if(rs1[i]>0) {
2246             if(opcode[i]==0x0a) { // SLTI
2247               if(sl<0) {
2248                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2249                 emit_slti32(t,imm[i],t);
2250               }else{
2251                 emit_slti32(sl,imm[i],t);
2252               }
2253             }
2254             else { // SLTIU
2255               if(sl<0) {
2256                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2257                 emit_sltiu32(t,imm[i],t);
2258               }else{
2259                 emit_sltiu32(sl,imm[i],t);
2260               }
2261             }
2262         }else{
2263           // SLTI(U) with r0 is just stupid,
2264           // nonetheless examples can be found
2265           if(opcode[i]==0x0a) // SLTI
2266             if(0<imm[i]) emit_movimm(1,t);
2267             else emit_zeroreg(t);
2268           else // SLTIU
2269           {
2270             if(imm[i]) emit_movimm(1,t);
2271             else emit_zeroreg(t);
2272           }
2273         }
2274       }
2275     }
2276   }
2277   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2278     if(rt1[i]) {
2279       signed char sh,sl,th,tl;
2280       th=get_reg(i_regs->regmap,rt1[i]|64);
2281       tl=get_reg(i_regs->regmap,rt1[i]);
2282       sh=get_reg(i_regs->regmap,rs1[i]|64);
2283       sl=get_reg(i_regs->regmap,rs1[i]);
2284       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2285         if(opcode[i]==0x0c) //ANDI
2286         {
2287           if(rs1[i]) {
2288             if(sl<0) {
2289               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2290               emit_andimm(tl,imm[i],tl);
2291             }else{
2292               if(!((i_regs->wasconst>>sl)&1))
2293                 emit_andimm(sl,imm[i],tl);
2294               else
2295                 emit_movimm(constmap[i][sl]&imm[i],tl);
2296             }
2297           }
2298           else
2299             emit_zeroreg(tl);
2300           if(th>=0) emit_zeroreg(th);
2301         }
2302         else
2303         {
2304           if(rs1[i]) {
2305             if(sl<0) {
2306               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2307             }
2308             if(th>=0) {
2309               if(sh<0) {
2310                 emit_loadreg(rs1[i]|64,th);
2311               }else{
2312                 emit_mov(sh,th);
2313               }
2314             }
2315             if(opcode[i]==0x0d) { // ORI
2316               if(sl<0) {
2317                 emit_orimm(tl,imm[i],tl);
2318               }else{
2319                 if(!((i_regs->wasconst>>sl)&1))
2320                   emit_orimm(sl,imm[i],tl);
2321                 else
2322                   emit_movimm(constmap[i][sl]|imm[i],tl);
2323               }
2324             }
2325             if(opcode[i]==0x0e) { // XORI
2326               if(sl<0) {
2327                 emit_xorimm(tl,imm[i],tl);
2328               }else{
2329                 if(!((i_regs->wasconst>>sl)&1))
2330                   emit_xorimm(sl,imm[i],tl);
2331                 else
2332                   emit_movimm(constmap[i][sl]^imm[i],tl);
2333               }
2334             }
2335           }
2336           else {
2337             emit_movimm(imm[i],tl);
2338             if(th>=0) emit_zeroreg(th);
2339           }
2340         }
2341       }
2342     }
2343   }
2344 }
2345
2346 void shiftimm_assemble(int i,struct regstat *i_regs)
2347 {
2348   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2349   {
2350     if(rt1[i]) {
2351       signed char s,t;
2352       t=get_reg(i_regs->regmap,rt1[i]);
2353       s=get_reg(i_regs->regmap,rs1[i]);
2354       //assert(t>=0);
2355       if(t>=0&&!((i_regs->isconst>>t)&1)){
2356         if(rs1[i]==0)
2357         {
2358           emit_zeroreg(t);
2359         }
2360         else
2361         {
2362           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2363           if(imm[i]) {
2364             if(opcode2[i]==0) // SLL
2365             {
2366               emit_shlimm(s<0?t:s,imm[i],t);
2367             }
2368             if(opcode2[i]==2) // SRL
2369             {
2370               emit_shrimm(s<0?t:s,imm[i],t);
2371             }
2372             if(opcode2[i]==3) // SRA
2373             {
2374               emit_sarimm(s<0?t:s,imm[i],t);
2375             }
2376           }else{
2377             // Shift by zero
2378             if(s>=0 && s!=t) emit_mov(s,t);
2379           }
2380         }
2381       }
2382       //emit_storereg(rt1[i],t); //DEBUG
2383     }
2384   }
2385   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2386   {
2387     assert(0);
2388   }
2389   if(opcode2[i]==0x3c) // DSLL32
2390   {
2391     assert(0);
2392   }
2393   if(opcode2[i]==0x3e) // DSRL32
2394   {
2395     assert(0);
2396   }
2397   if(opcode2[i]==0x3f) // DSRA32
2398   {
2399     assert(0);
2400   }
2401 }
2402
2403 #ifndef shift_assemble
2404 void shift_assemble(int i,struct regstat *i_regs)
2405 {
2406   printf("Need shift_assemble for this architecture.\n");
2407   exit(1);
2408 }
2409 #endif
2410
2411 enum {
2412   MTYPE_8000 = 0,
2413   MTYPE_8020,
2414   MTYPE_0000,
2415   MTYPE_A000,
2416   MTYPE_1F80,
2417 };
2418
2419 static int get_ptr_mem_type(u_int a)
2420 {
2421   if(a < 0x00200000) {
2422     if(a<0x1000&&((start>>20)==0xbfc||(start>>24)==0xa0))
2423       // return wrong, must use memhandler for BIOS self-test to pass
2424       // 007 does similar stuff from a00 mirror, weird stuff
2425       return MTYPE_8000;
2426     return MTYPE_0000;
2427   }
2428   if(0x1f800000 <= a && a < 0x1f801000)
2429     return MTYPE_1F80;
2430   if(0x80200000 <= a && a < 0x80800000)
2431     return MTYPE_8020;
2432   if(0xa0000000 <= a && a < 0xa0200000)
2433     return MTYPE_A000;
2434   return MTYPE_8000;
2435 }
2436
2437 static void *emit_fastpath_cmp_jump(int i,int addr,int *addr_reg_override)
2438 {
2439   void *jaddr = NULL;
2440   int type=0;
2441   int mr=rs1[i];
2442   if(((smrv_strong|smrv_weak)>>mr)&1) {
2443     type=get_ptr_mem_type(smrv[mr]);
2444     //printf("set %08x @%08x r%d %d\n", smrv[mr], start+i*4, mr, type);
2445   }
2446   else {
2447     // use the mirror we are running on
2448     type=get_ptr_mem_type(start);
2449     //printf("set nospec   @%08x r%d %d\n", start+i*4, mr, type);
2450   }
2451
2452   if(type==MTYPE_8020) { // RAM 80200000+ mirror
2453     emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2454     addr=*addr_reg_override=HOST_TEMPREG;
2455     type=0;
2456   }
2457   else if(type==MTYPE_0000) { // RAM 0 mirror
2458     emit_orimm(addr,0x80000000,HOST_TEMPREG);
2459     addr=*addr_reg_override=HOST_TEMPREG;
2460     type=0;
2461   }
2462   else if(type==MTYPE_A000) { // RAM A mirror
2463     emit_andimm(addr,~0x20000000,HOST_TEMPREG);
2464     addr=*addr_reg_override=HOST_TEMPREG;
2465     type=0;
2466   }
2467   else if(type==MTYPE_1F80) { // scratchpad
2468     if (psxH == (void *)0x1f800000) {
2469       emit_addimm(addr,-0x1f800000,HOST_TEMPREG);
2470       emit_cmpimm(HOST_TEMPREG,0x1000);
2471       jaddr=out;
2472       emit_jc(0);
2473     }
2474     else {
2475       // do the usual RAM check, jump will go to the right handler
2476       type=0;
2477     }
2478   }
2479
2480   if(type==0)
2481   {
2482     emit_cmpimm(addr,RAM_SIZE);
2483     jaddr=out;
2484     #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2485     // Hint to branch predictor that the branch is unlikely to be taken
2486     if(rs1[i]>=28)
2487       emit_jno_unlikely(0);
2488     else
2489     #endif
2490       emit_jno(0);
2491     if(ram_offset!=0) {
2492       emit_addimm(addr,ram_offset,HOST_TEMPREG);
2493       addr=*addr_reg_override=HOST_TEMPREG;
2494     }
2495   }
2496
2497   return jaddr;
2498 }
2499
2500 // return memhandler, or get directly accessable address and return 0
2501 static void *get_direct_memhandler(void *table, u_int addr,
2502   enum stub_type type, uintptr_t *addr_host)
2503 {
2504   uintptr_t l1, l2 = 0;
2505   l1 = ((uintptr_t *)table)[addr>>12];
2506   if ((l1 & (1ul << (sizeof(l1)*8-1))) == 0) {
2507     uintptr_t v = l1 << 1;
2508     *addr_host = v + addr;
2509     return NULL;
2510   }
2511   else {
2512     l1 <<= 1;
2513     if (type == LOADB_STUB || type == LOADBU_STUB || type == STOREB_STUB)
2514       l2 = ((uintptr_t *)l1)[0x1000/4 + 0x1000/2 + (addr&0xfff)];
2515     else if (type == LOADH_STUB || type == LOADHU_STUB || type == STOREH_STUB)
2516       l2=((uintptr_t *)l1)[0x1000/4 + (addr&0xfff)/2];
2517     else
2518       l2=((uintptr_t *)l1)[(addr&0xfff)/4];
2519     if ((l2 & (1<<31)) == 0) {
2520       uintptr_t v = l2 << 1;
2521       *addr_host = v + (addr&0xfff);
2522       return NULL;
2523     }
2524     return (void *)(l2 << 1);
2525   }
2526 }
2527
2528 static void load_assemble(int i,struct regstat *i_regs)
2529 {
2530   int s,th,tl,addr;
2531   int offset;
2532   void *jaddr=0;
2533   int memtarget=0,c=0;
2534   int fastload_reg_override=0;
2535   u_int hr,reglist=0;
2536   th=get_reg(i_regs->regmap,rt1[i]|64);
2537   tl=get_reg(i_regs->regmap,rt1[i]);
2538   s=get_reg(i_regs->regmap,rs1[i]);
2539   offset=imm[i];
2540   for(hr=0;hr<HOST_REGS;hr++) {
2541     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2542   }
2543   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2544   if(s>=0) {
2545     c=(i_regs->wasconst>>s)&1;
2546     if (c) {
2547       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2548     }
2549   }
2550   //printf("load_assemble: c=%d\n",c);
2551   //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2552   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2553   if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2554     ||rt1[i]==0) {
2555       // could be FIFO, must perform the read
2556       // ||dummy read
2557       assem_debug("(forced read)\n");
2558       tl=get_reg(i_regs->regmap,-1);
2559       assert(tl>=0);
2560   }
2561   if(offset||s<0||c) addr=tl;
2562   else addr=s;
2563   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2564  if(tl>=0) {
2565   //printf("load_assemble: c=%d\n",c);
2566   //if(c) printf("load_assemble: const=%lx\n",(long)constmap[i][s]+offset);
2567   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2568   reglist&=~(1<<tl);
2569   if(th>=0) reglist&=~(1<<th);
2570   if(!c) {
2571     #ifdef R29_HACK
2572     // Strmnnrmn's speed hack
2573     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2574     #endif
2575     {
2576       jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2577     }
2578   }
2579   else if(ram_offset&&memtarget) {
2580     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2581     fastload_reg_override=HOST_TEMPREG;
2582   }
2583   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2584   if (opcode[i]==0x20) { // LB
2585     if(!c||memtarget) {
2586       if(!dummy) {
2587         {
2588           int x=0,a=tl;
2589           if(!c) a=addr;
2590           if(fastload_reg_override) a=fastload_reg_override;
2591
2592           emit_movsbl_indexed(x,a,tl);
2593         }
2594       }
2595       if(jaddr)
2596         add_stub_r(LOADB_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2597     }
2598     else
2599       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2600   }
2601   if (opcode[i]==0x21) { // LH
2602     if(!c||memtarget) {
2603       if(!dummy) {
2604         int x=0,a=tl;
2605         if(!c) a=addr;
2606         if(fastload_reg_override) a=fastload_reg_override;
2607         emit_movswl_indexed(x,a,tl);
2608       }
2609       if(jaddr)
2610         add_stub_r(LOADH_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2611     }
2612     else
2613       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2614   }
2615   if (opcode[i]==0x23) { // LW
2616     if(!c||memtarget) {
2617       if(!dummy) {
2618         int a=addr;
2619         if(fastload_reg_override) a=fastload_reg_override;
2620         emit_readword_indexed(0,a,tl);
2621       }
2622       if(jaddr)
2623         add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2624     }
2625     else
2626       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2627   }
2628   if (opcode[i]==0x24) { // LBU
2629     if(!c||memtarget) {
2630       if(!dummy) {
2631         int x=0,a=tl;
2632         if(!c) a=addr;
2633         if(fastload_reg_override) a=fastload_reg_override;
2634
2635         emit_movzbl_indexed(x,a,tl);
2636       }
2637       if(jaddr)
2638         add_stub_r(LOADBU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2639     }
2640     else
2641       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2642   }
2643   if (opcode[i]==0x25) { // LHU
2644     if(!c||memtarget) {
2645       if(!dummy) {
2646         int x=0,a=tl;
2647         if(!c) a=addr;
2648         if(fastload_reg_override) a=fastload_reg_override;
2649         emit_movzwl_indexed(x,a,tl);
2650       }
2651       if(jaddr)
2652         add_stub_r(LOADHU_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2653     }
2654     else
2655       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2656   }
2657   if (opcode[i]==0x27) { // LWU
2658     assert(th>=0);
2659     if(!c||memtarget) {
2660       if(!dummy) {
2661         int a=addr;
2662         if(fastload_reg_override) a=fastload_reg_override;
2663         emit_readword_indexed(0,a,tl);
2664       }
2665       if(jaddr)
2666         add_stub_r(LOADW_STUB,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2667     }
2668     else {
2669       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2670     }
2671     emit_zeroreg(th);
2672   }
2673   if (opcode[i]==0x37) { // LD
2674     assert(0);
2675   }
2676  }
2677 }
2678
2679 #ifndef loadlr_assemble
2680 void loadlr_assemble(int i,struct regstat *i_regs)
2681 {
2682   printf("Need loadlr_assemble for this architecture.\n");
2683   exit(1);
2684 }
2685 #endif
2686
2687 void store_assemble(int i,struct regstat *i_regs)
2688 {
2689   int s,tl;
2690   int addr,temp;
2691   int offset;
2692   void *jaddr=0;
2693   enum stub_type type;
2694   int memtarget=0,c=0;
2695   int agr=AGEN1+(i&1);
2696   int faststore_reg_override=0;
2697   u_int hr,reglist=0;
2698   tl=get_reg(i_regs->regmap,rs2[i]);
2699   s=get_reg(i_regs->regmap,rs1[i]);
2700   temp=get_reg(i_regs->regmap,agr);
2701   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2702   offset=imm[i];
2703   if(s>=0) {
2704     c=(i_regs->wasconst>>s)&1;
2705     if(c) {
2706       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2707     }
2708   }
2709   assert(tl>=0);
2710   assert(temp>=0);
2711   for(hr=0;hr<HOST_REGS;hr++) {
2712     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2713   }
2714   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2715   if(offset||s<0||c) addr=temp;
2716   else addr=s;
2717   if(!c) {
2718     jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2719   }
2720   else if(ram_offset&&memtarget) {
2721     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2722     faststore_reg_override=HOST_TEMPREG;
2723   }
2724
2725   if (opcode[i]==0x28) { // SB
2726     if(!c||memtarget) {
2727       int x=0,a=temp;
2728       if(!c) a=addr;
2729       if(faststore_reg_override) a=faststore_reg_override;
2730       emit_writebyte_indexed(tl,x,a);
2731     }
2732     type=STOREB_STUB;
2733   }
2734   if (opcode[i]==0x29) { // SH
2735     if(!c||memtarget) {
2736       int x=0,a=temp;
2737       if(!c) a=addr;
2738       if(faststore_reg_override) a=faststore_reg_override;
2739       emit_writehword_indexed(tl,x,a);
2740     }
2741     type=STOREH_STUB;
2742   }
2743   if (opcode[i]==0x2B) { // SW
2744     if(!c||memtarget) {
2745       int a=addr;
2746       if(faststore_reg_override) a=faststore_reg_override;
2747       emit_writeword_indexed(tl,0,a);
2748     }
2749     type=STOREW_STUB;
2750   }
2751   if (opcode[i]==0x3F) { // SD
2752     assert(0);
2753     type=STORED_STUB;
2754   }
2755   if(jaddr) {
2756     // PCSX store handlers don't check invcode again
2757     reglist|=1<<addr;
2758     add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2759     jaddr=0;
2760   }
2761   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2762     if(!c||memtarget) {
2763       #ifdef DESTRUCTIVE_SHIFT
2764       // The x86 shift operation is 'destructive'; it overwrites the
2765       // source register, so we need to make a copy first and use that.
2766       addr=temp;
2767       #endif
2768       #if defined(HOST_IMM8)
2769       int ir=get_reg(i_regs->regmap,INVCP);
2770       assert(ir>=0);
2771       emit_cmpmem_indexedsr12_reg(ir,addr,1);
2772       #else
2773       emit_cmpmem_indexedsr12_imm(invalid_code,addr,1);
2774       #endif
2775       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2776       emit_callne(invalidate_addr_reg[addr]);
2777       #else
2778       void *jaddr2 = out;
2779       emit_jne(0);
2780       add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2781       #endif
2782     }
2783   }
2784   u_int addr_val=constmap[i][s]+offset;
2785   if(jaddr) {
2786     add_stub_r(type,jaddr,out,i,addr,i_regs,ccadj[i],reglist);
2787   } else if(c&&!memtarget) {
2788     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
2789   }
2790   // basic current block modification detection..
2791   // not looking back as that should be in mips cache already
2792   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
2793     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
2794     assert(i_regs->regmap==regs[i].regmap); // not delay slot
2795     if(i_regs->regmap==regs[i].regmap) {
2796       load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
2797       wb_dirtys(regs[i].regmap_entry,regs[i].wasdirty);
2798       emit_movimm(start+i*4+4,0);
2799       emit_writeword(0,&pcaddr);
2800       emit_jmp(do_interrupt);
2801     }
2802   }
2803 }
2804
2805 void storelr_assemble(int i,struct regstat *i_regs)
2806 {
2807   int s,tl;
2808   int temp;
2809   int offset;
2810   void *jaddr=0;
2811   void *case1, *case2, *case3;
2812   void *done0, *done1, *done2;
2813   int memtarget=0,c=0;
2814   int agr=AGEN1+(i&1);
2815   u_int hr,reglist=0;
2816   tl=get_reg(i_regs->regmap,rs2[i]);
2817   s=get_reg(i_regs->regmap,rs1[i]);
2818   temp=get_reg(i_regs->regmap,agr);
2819   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2820   offset=imm[i];
2821   if(s>=0) {
2822     c=(i_regs->isconst>>s)&1;
2823     if(c) {
2824       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2825     }
2826   }
2827   assert(tl>=0);
2828   for(hr=0;hr<HOST_REGS;hr++) {
2829     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2830   }
2831   assert(temp>=0);
2832   if(!c) {
2833     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
2834     if(!offset&&s!=temp) emit_mov(s,temp);
2835     jaddr=out;
2836     emit_jno(0);
2837   }
2838   else
2839   {
2840     if(!memtarget||!rs1[i]) {
2841       jaddr=out;
2842       emit_jmp(0);
2843     }
2844   }
2845   emit_addimm_no_flags(ram_offset,temp);
2846
2847   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
2848     assert(0);
2849   }
2850
2851   emit_xorimm(temp,3,temp);
2852   emit_testimm(temp,2);
2853   case2=out;
2854   emit_jne(0);
2855   emit_testimm(temp,1);
2856   case1=out;
2857   emit_jne(0);
2858   // 0
2859   if (opcode[i]==0x2A) { // SWL
2860     emit_writeword_indexed(tl,0,temp);
2861   }
2862   if (opcode[i]==0x2E) { // SWR
2863     emit_writebyte_indexed(tl,3,temp);
2864   }
2865   if (opcode[i]==0x2C) { // SDL
2866     assert(0);
2867   }
2868   if (opcode[i]==0x2D) { // SDR
2869     assert(0);
2870   }
2871   done0=out;
2872   emit_jmp(0);
2873   // 1
2874   set_jump_target(case1, out);
2875   if (opcode[i]==0x2A) { // SWL
2876     // Write 3 msb into three least significant bytes
2877     if(rs2[i]) emit_rorimm(tl,8,tl);
2878     emit_writehword_indexed(tl,-1,temp);
2879     if(rs2[i]) emit_rorimm(tl,16,tl);
2880     emit_writebyte_indexed(tl,1,temp);
2881     if(rs2[i]) emit_rorimm(tl,8,tl);
2882   }
2883   if (opcode[i]==0x2E) { // SWR
2884     // Write two lsb into two most significant bytes
2885     emit_writehword_indexed(tl,1,temp);
2886   }
2887   if (opcode[i]==0x2C) { // SDL
2888     assert(0);
2889   }
2890   if (opcode[i]==0x2D) { // SDR
2891     assert(0);
2892   }
2893   done1=out;
2894   emit_jmp(0);
2895   // 2
2896   set_jump_target(case2, out);
2897   emit_testimm(temp,1);
2898   case3=out;
2899   emit_jne(0);
2900   if (opcode[i]==0x2A) { // SWL
2901     // Write two msb into two least significant bytes
2902     if(rs2[i]) emit_rorimm(tl,16,tl);
2903     emit_writehword_indexed(tl,-2,temp);
2904     if(rs2[i]) emit_rorimm(tl,16,tl);
2905   }
2906   if (opcode[i]==0x2E) { // SWR
2907     // Write 3 lsb into three most significant bytes
2908     emit_writebyte_indexed(tl,-1,temp);
2909     if(rs2[i]) emit_rorimm(tl,8,tl);
2910     emit_writehword_indexed(tl,0,temp);
2911     if(rs2[i]) emit_rorimm(tl,24,tl);
2912   }
2913   if (opcode[i]==0x2C) { // SDL
2914     assert(0);
2915   }
2916   if (opcode[i]==0x2D) { // SDR
2917     assert(0);
2918   }
2919   done2=out;
2920   emit_jmp(0);
2921   // 3
2922   set_jump_target(case3, out);
2923   if (opcode[i]==0x2A) { // SWL
2924     // Write msb into least significant byte
2925     if(rs2[i]) emit_rorimm(tl,24,tl);
2926     emit_writebyte_indexed(tl,-3,temp);
2927     if(rs2[i]) emit_rorimm(tl,8,tl);
2928   }
2929   if (opcode[i]==0x2E) { // SWR
2930     // Write entire word
2931     emit_writeword_indexed(tl,-3,temp);
2932   }
2933   if (opcode[i]==0x2C) { // SDL
2934     assert(0);
2935   }
2936   if (opcode[i]==0x2D) { // SDR
2937     assert(0);
2938   }
2939   set_jump_target(done0, out);
2940   set_jump_target(done1, out);
2941   set_jump_target(done2, out);
2942   if (opcode[i]==0x2C) { // SDL
2943     assert(0);
2944   }
2945   if (opcode[i]==0x2D) { // SDR
2946     assert(0);
2947   }
2948   if(!c||!memtarget)
2949     add_stub_r(STORELR_STUB,jaddr,out,i,temp,i_regs,ccadj[i],reglist);
2950   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2951     emit_addimm_no_flags(-ram_offset,temp);
2952     #if defined(HOST_IMM8)
2953     int ir=get_reg(i_regs->regmap,INVCP);
2954     assert(ir>=0);
2955     emit_cmpmem_indexedsr12_reg(ir,temp,1);
2956     #else
2957     emit_cmpmem_indexedsr12_imm(invalid_code,temp,1);
2958     #endif
2959     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2960     emit_callne(invalidate_addr_reg[temp]);
2961     #else
2962     void *jaddr2 = out;
2963     emit_jne(0);
2964     add_stub(INVCODE_STUB,jaddr2,out,reglist|(1<<HOST_CCREG),temp,0,0,0);
2965     #endif
2966   }
2967 }
2968
2969 static void cop0_assemble(int i,struct regstat *i_regs)
2970 {
2971   if(opcode2[i]==0) // MFC0
2972   {
2973     signed char t=get_reg(i_regs->regmap,rt1[i]);
2974     u_int copr=(source[i]>>11)&0x1f;
2975     //assert(t>=0); // Why does this happen?  OOT is weird
2976     if(t>=0&&rt1[i]!=0) {
2977       emit_readword(&reg_cop0[copr],t);
2978     }
2979   }
2980   else if(opcode2[i]==4) // MTC0
2981   {
2982     signed char s=get_reg(i_regs->regmap,rs1[i]);
2983     char copr=(source[i]>>11)&0x1f;
2984     assert(s>=0);
2985     wb_register(rs1[i],i_regs->regmap,i_regs->dirty);
2986     if(copr==9||copr==11||copr==12||copr==13) {
2987       emit_readword(&last_count,HOST_TEMPREG);
2988       emit_loadreg(CCREG,HOST_CCREG); // TODO: do proper reg alloc
2989       emit_add(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
2990       emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
2991       emit_writeword(HOST_CCREG,&Count);
2992     }
2993     // What a mess.  The status register (12) can enable interrupts,
2994     // so needs a special case to handle a pending interrupt.
2995     // The interrupt must be taken immediately, because a subsequent
2996     // instruction might disable interrupts again.
2997     if(copr==12||copr==13) {
2998       if (is_delayslot) {
2999         // burn cycles to cause cc_interrupt, which will
3000         // reschedule next_interupt. Relies on CCREG from above.
3001         assem_debug("MTC0 DS %d\n", copr);
3002         emit_writeword(HOST_CCREG,&last_count);
3003         emit_movimm(0,HOST_CCREG);
3004         emit_storereg(CCREG,HOST_CCREG);
3005         emit_loadreg(rs1[i],1);
3006         emit_movimm(copr,0);
3007         emit_call(pcsx_mtc0_ds);
3008         emit_loadreg(rs1[i],s);
3009         return;
3010       }
3011       emit_movimm(start+i*4+4,HOST_TEMPREG);
3012       emit_writeword(HOST_TEMPREG,&pcaddr);
3013       emit_movimm(0,HOST_TEMPREG);
3014       emit_writeword(HOST_TEMPREG,&pending_exception);
3015     }
3016     //else if(copr==12&&is_delayslot) emit_call((int)MTC0_R12);
3017     //else
3018     if(s==HOST_CCREG)
3019       emit_loadreg(rs1[i],1);
3020     else if(s!=1)
3021       emit_mov(s,1);
3022     emit_movimm(copr,0);
3023     emit_call(pcsx_mtc0);
3024     if(copr==9||copr==11||copr==12||copr==13) {
3025       emit_readword(&Count,HOST_CCREG);
3026       emit_readword(&next_interupt,HOST_TEMPREG);
3027       emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3028       emit_sub(HOST_CCREG,HOST_TEMPREG,HOST_CCREG);
3029       emit_writeword(HOST_TEMPREG,&last_count);
3030       emit_storereg(CCREG,HOST_CCREG);
3031     }
3032     if(copr==12||copr==13) {
3033       assert(!is_delayslot);
3034       emit_readword(&pending_exception,14);
3035       emit_test(14,14);
3036       emit_jne(&do_interrupt);
3037     }
3038     emit_loadreg(rs1[i],s);
3039     if(get_reg(i_regs->regmap,rs1[i]|64)>=0)
3040       emit_loadreg(rs1[i]|64,get_reg(i_regs->regmap,rs1[i]|64));
3041   }
3042   else
3043   {
3044     assert(opcode2[i]==0x10);
3045     //if((source[i]&0x3f)==0x10) // RFE
3046     {
3047       emit_readword(&Status,0);
3048       emit_andimm(0,0x3c,1);
3049       emit_andimm(0,~0xf,0);
3050       emit_orrshr_imm(1,2,0);
3051       emit_writeword(0,&Status);
3052     }
3053   }
3054 }
3055
3056 static void cop1_unusable(int i,struct regstat *i_regs)
3057 {
3058   // XXX: should just just do the exception instead
3059   //if(!cop1_usable)
3060   {
3061     void *jaddr=out;
3062     emit_jmp(0);
3063     add_stub_r(FP_STUB,jaddr,out,i,0,i_regs,is_delayslot,0);
3064   }
3065 }
3066
3067 static void cop1_assemble(int i,struct regstat *i_regs)
3068 {
3069   cop1_unusable(i, i_regs);
3070 }
3071
3072 static void c1ls_assemble(int i,struct regstat *i_regs)
3073 {
3074   cop1_unusable(i, i_regs);
3075 }
3076
3077 // FP_STUB
3078 static void do_cop1stub(int n)
3079 {
3080   literal_pool(256);
3081   assem_debug("do_cop1stub %x\n",start+stubs[n].a*4);
3082   set_jump_target(stubs[n].addr, out);
3083   int i=stubs[n].a;
3084 //  int rs=stubs[n].b;
3085   struct regstat *i_regs=(struct regstat *)stubs[n].c;
3086   int ds=stubs[n].d;
3087   if(!ds) {
3088     load_all_consts(regs[i].regmap_entry,regs[i].wasdirty,i);
3089     //if(i_regs!=&regs[i]) printf("oops: regs[i]=%x i_regs=%x",(int)&regs[i],(int)i_regs);
3090   }
3091   //else {printf("fp exception in delay slot\n");}
3092   wb_dirtys(i_regs->regmap_entry,i_regs->wasdirty);
3093   if(regs[i].regmap_entry[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
3094   emit_movimm(start+(i-ds)*4,EAX); // Get PC
3095   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3096   emit_jmp(ds?fp_exception_ds:fp_exception);
3097 }
3098
3099 static void cop2_get_dreg(u_int copr,signed char tl,signed char temp)
3100 {
3101   switch (copr) {
3102     case 1:
3103     case 3:
3104     case 5:
3105     case 8:
3106     case 9:
3107     case 10:
3108     case 11:
3109       emit_readword(&reg_cop2d[copr],tl);
3110       emit_signextend16(tl,tl);
3111       emit_writeword(tl,&reg_cop2d[copr]); // hmh
3112       break;
3113     case 7:
3114     case 16:
3115     case 17:
3116     case 18:
3117     case 19:
3118       emit_readword(&reg_cop2d[copr],tl);
3119       emit_andimm(tl,0xffff,tl);
3120       emit_writeword(tl,&reg_cop2d[copr]);
3121       break;
3122     case 15:
3123       emit_readword(&reg_cop2d[14],tl); // SXY2
3124       emit_writeword(tl,&reg_cop2d[copr]);
3125       break;
3126     case 28:
3127     case 29:
3128       emit_readword(&reg_cop2d[9],temp);
3129       emit_testimm(temp,0x8000); // do we need this?
3130       emit_andimm(temp,0xf80,temp);
3131       emit_andne_imm(temp,0,temp);
3132       emit_shrimm(temp,7,tl);
3133       emit_readword(&reg_cop2d[10],temp);
3134       emit_testimm(temp,0x8000);
3135       emit_andimm(temp,0xf80,temp);
3136       emit_andne_imm(temp,0,temp);
3137       emit_orrshr_imm(temp,2,tl);
3138       emit_readword(&reg_cop2d[11],temp);
3139       emit_testimm(temp,0x8000);
3140       emit_andimm(temp,0xf80,temp);
3141       emit_andne_imm(temp,0,temp);
3142       emit_orrshl_imm(temp,3,tl);
3143       emit_writeword(tl,&reg_cop2d[copr]);
3144       break;
3145     default:
3146       emit_readword(&reg_cop2d[copr],tl);
3147       break;
3148   }
3149 }
3150
3151 static void cop2_put_dreg(u_int copr,signed char sl,signed char temp)
3152 {
3153   switch (copr) {
3154     case 15:
3155       emit_readword(&reg_cop2d[13],temp);  // SXY1
3156       emit_writeword(sl,&reg_cop2d[copr]);
3157       emit_writeword(temp,&reg_cop2d[12]); // SXY0
3158       emit_readword(&reg_cop2d[14],temp);  // SXY2
3159       emit_writeword(sl,&reg_cop2d[14]);
3160       emit_writeword(temp,&reg_cop2d[13]); // SXY1
3161       break;
3162     case 28:
3163       emit_andimm(sl,0x001f,temp);
3164       emit_shlimm(temp,7,temp);
3165       emit_writeword(temp,&reg_cop2d[9]);
3166       emit_andimm(sl,0x03e0,temp);
3167       emit_shlimm(temp,2,temp);
3168       emit_writeword(temp,&reg_cop2d[10]);
3169       emit_andimm(sl,0x7c00,temp);
3170       emit_shrimm(temp,3,temp);
3171       emit_writeword(temp,&reg_cop2d[11]);
3172       emit_writeword(sl,&reg_cop2d[28]);
3173       break;
3174     case 30:
3175       emit_movs(sl,temp);
3176       emit_mvnmi(temp,temp);
3177 #if defined(HAVE_ARMV5) || defined(__aarch64__)
3178       emit_clz(temp,temp);
3179 #else
3180       emit_movs(temp,HOST_TEMPREG);
3181       emit_movimm(0,temp);
3182       emit_jeq((int)out+4*4);
3183       emit_addpl_imm(temp,1,temp);
3184       emit_lslpls_imm(HOST_TEMPREG,1,HOST_TEMPREG);
3185       emit_jns((int)out-2*4);
3186 #endif
3187       emit_writeword(sl,&reg_cop2d[30]);
3188       emit_writeword(temp,&reg_cop2d[31]);
3189       break;
3190     case 31:
3191       break;
3192     default:
3193       emit_writeword(sl,&reg_cop2d[copr]);
3194       break;
3195   }
3196 }
3197
3198 static void c2ls_assemble(int i,struct regstat *i_regs)
3199 {
3200   int s,tl;
3201   int ar;
3202   int offset;
3203   int memtarget=0,c=0;
3204   void *jaddr2=NULL;
3205   enum stub_type type;
3206   int agr=AGEN1+(i&1);
3207   int fastio_reg_override=0;
3208   u_int hr,reglist=0;
3209   u_int copr=(source[i]>>16)&0x1f;
3210   s=get_reg(i_regs->regmap,rs1[i]);
3211   tl=get_reg(i_regs->regmap,FTEMP);
3212   offset=imm[i];
3213   assert(rs1[i]>0);
3214   assert(tl>=0);
3215
3216   for(hr=0;hr<HOST_REGS;hr++) {
3217     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3218   }
3219   if(i_regs->regmap[HOST_CCREG]==CCREG)
3220     reglist&=~(1<<HOST_CCREG);
3221
3222   // get the address
3223   if (opcode[i]==0x3a) { // SWC2
3224     ar=get_reg(i_regs->regmap,agr);
3225     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3226     reglist|=1<<ar;
3227   } else { // LWC2
3228     ar=tl;
3229   }
3230   if(s>=0) c=(i_regs->wasconst>>s)&1;
3231   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3232   if (!offset&&!c&&s>=0) ar=s;
3233   assert(ar>=0);
3234
3235   if (opcode[i]==0x3a) { // SWC2
3236     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3237     type=STOREW_STUB;
3238   }
3239   else
3240     type=LOADW_STUB;
3241
3242   if(c&&!memtarget) {
3243     jaddr2=out;
3244     emit_jmp(0); // inline_readstub/inline_writestub?
3245   }
3246   else {
3247     if(!c) {
3248       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3249     }
3250     else if(ram_offset&&memtarget) {
3251       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3252       fastio_reg_override=HOST_TEMPREG;
3253     }
3254     if (opcode[i]==0x32) { // LWC2
3255       int a=ar;
3256       if(fastio_reg_override) a=fastio_reg_override;
3257       emit_readword_indexed(0,a,tl);
3258     }
3259     if (opcode[i]==0x3a) { // SWC2
3260       #ifdef DESTRUCTIVE_SHIFT
3261       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3262       #endif
3263       int a=ar;
3264       if(fastio_reg_override) a=fastio_reg_override;
3265       emit_writeword_indexed(tl,0,a);
3266     }
3267   }
3268   if(jaddr2)
3269     add_stub_r(type,jaddr2,out,i,ar,i_regs,ccadj[i],reglist);
3270   if(opcode[i]==0x3a) // SWC2
3271   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3272 #if defined(HOST_IMM8)
3273     int ir=get_reg(i_regs->regmap,INVCP);
3274     assert(ir>=0);
3275     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3276 #else
3277     emit_cmpmem_indexedsr12_imm(invalid_code,ar,1);
3278 #endif
3279     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3280     emit_callne(invalidate_addr_reg[ar]);
3281     #else
3282     void *jaddr3 = out;
3283     emit_jne(0);
3284     add_stub(INVCODE_STUB,jaddr3,out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3285     #endif
3286   }
3287   if (opcode[i]==0x32) { // LWC2
3288     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3289   }
3290 }
3291
3292 static void cop2_assemble(int i,struct regstat *i_regs)
3293 {
3294   u_int copr=(source[i]>>11)&0x1f;
3295   signed char temp=get_reg(i_regs->regmap,-1);
3296   if (opcode2[i]==0) { // MFC2
3297     signed char tl=get_reg(i_regs->regmap,rt1[i]);
3298     if(tl>=0&&rt1[i]!=0)
3299       cop2_get_dreg(copr,tl,temp);
3300   }
3301   else if (opcode2[i]==4) { // MTC2
3302     signed char sl=get_reg(i_regs->regmap,rs1[i]);
3303     cop2_put_dreg(copr,sl,temp);
3304   }
3305   else if (opcode2[i]==2) // CFC2
3306   {
3307     signed char tl=get_reg(i_regs->regmap,rt1[i]);
3308     if(tl>=0&&rt1[i]!=0)
3309       emit_readword(&reg_cop2c[copr],tl);
3310   }
3311   else if (opcode2[i]==6) // CTC2
3312   {
3313     signed char sl=get_reg(i_regs->regmap,rs1[i]);
3314     switch(copr) {
3315       case 4:
3316       case 12:
3317       case 20:
3318       case 26:
3319       case 27:
3320       case 29:
3321       case 30:
3322         emit_signextend16(sl,temp);
3323         break;
3324       case 31:
3325         //value = value & 0x7ffff000;
3326         //if (value & 0x7f87e000) value |= 0x80000000;
3327         emit_shrimm(sl,12,temp);
3328         emit_shlimm(temp,12,temp);
3329         emit_testimm(temp,0x7f000000);
3330         emit_testeqimm(temp,0x00870000);
3331         emit_testeqimm(temp,0x0000e000);
3332         emit_orrne_imm(temp,0x80000000,temp);
3333         break;
3334       default:
3335         temp=sl;
3336         break;
3337     }
3338     emit_writeword(temp,&reg_cop2c[copr]);
3339     assert(sl>=0);
3340   }
3341 }
3342
3343 #ifndef multdiv_assemble
3344 void multdiv_assemble(int i,struct regstat *i_regs)
3345 {
3346   printf("Need multdiv_assemble for this architecture.\n");
3347   exit(1);
3348 }
3349 #endif
3350
3351 void mov_assemble(int i,struct regstat *i_regs)
3352 {
3353   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3354   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3355   if(rt1[i]) {
3356     signed char sh,sl,th,tl;
3357     th=get_reg(i_regs->regmap,rt1[i]|64);
3358     tl=get_reg(i_regs->regmap,rt1[i]);
3359     //assert(tl>=0);
3360     if(tl>=0) {
3361       sh=get_reg(i_regs->regmap,rs1[i]|64);
3362       sl=get_reg(i_regs->regmap,rs1[i]);
3363       if(sl>=0) emit_mov(sl,tl);
3364       else emit_loadreg(rs1[i],tl);
3365       if(th>=0) {
3366         if(sh>=0) emit_mov(sh,th);
3367         else emit_loadreg(rs1[i]|64,th);
3368       }
3369     }
3370   }
3371 }
3372
3373 void syscall_assemble(int i,struct regstat *i_regs)
3374 {
3375   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3376   assert(ccreg==HOST_CCREG);
3377   assert(!is_delayslot);
3378   (void)ccreg;
3379   emit_movimm(start+i*4,EAX); // Get PC
3380   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3381   emit_jmp(jump_syscall_hle); // XXX
3382 }
3383
3384 void hlecall_assemble(int i,struct regstat *i_regs)
3385 {
3386   extern void psxNULL();
3387   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3388   assert(ccreg==HOST_CCREG);
3389   assert(!is_delayslot);
3390   (void)ccreg;
3391   emit_movimm(start+i*4+4,0); // Get PC
3392   uint32_t hleCode = source[i] & 0x03ffffff;
3393   if (hleCode >= ARRAY_SIZE(psxHLEt))
3394     emit_movimm((uintptr_t)psxNULL,1);
3395   else
3396     emit_movimm((uintptr_t)psxHLEt[hleCode],1);
3397   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3398   emit_jmp(jump_hlecall);
3399 }
3400
3401 void intcall_assemble(int i,struct regstat *i_regs)
3402 {
3403   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3404   assert(ccreg==HOST_CCREG);
3405   assert(!is_delayslot);
3406   (void)ccreg;
3407   emit_movimm(start+i*4,0); // Get PC
3408   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3409   emit_jmp(jump_intcall);
3410 }
3411
3412 static void speculate_mov(int rs,int rt)
3413 {
3414   if(rt!=0) {
3415     smrv_strong_next|=1<<rt;
3416     smrv[rt]=smrv[rs];
3417   }
3418 }
3419
3420 static void speculate_mov_weak(int rs,int rt)
3421 {
3422   if(rt!=0) {
3423     smrv_weak_next|=1<<rt;
3424     smrv[rt]=smrv[rs];
3425   }
3426 }
3427
3428 static void speculate_register_values(int i)
3429 {
3430   if(i==0) {
3431     memcpy(smrv,psxRegs.GPR.r,sizeof(smrv));
3432     // gp,sp are likely to stay the same throughout the block
3433     smrv_strong_next=(1<<28)|(1<<29)|(1<<30);
3434     smrv_weak_next=~smrv_strong_next;
3435     //printf(" llr %08x\n", smrv[4]);
3436   }
3437   smrv_strong=smrv_strong_next;
3438   smrv_weak=smrv_weak_next;
3439   switch(itype[i]) {
3440     case ALU:
3441       if     ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
3442       else if((smrv_strong>>rs2[i])&1) speculate_mov(rs2[i],rt1[i]);
3443       else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
3444       else if((smrv_weak>>rs2[i])&1) speculate_mov_weak(rs2[i],rt1[i]);
3445       else {
3446         smrv_strong_next&=~(1<<rt1[i]);
3447         smrv_weak_next&=~(1<<rt1[i]);
3448       }
3449       break;
3450     case SHIFTIMM:
3451       smrv_strong_next&=~(1<<rt1[i]);
3452       smrv_weak_next&=~(1<<rt1[i]);
3453       // fallthrough
3454     case IMM16:
3455       if(rt1[i]&&is_const(&regs[i],rt1[i])) {
3456         int value,hr=get_reg(regs[i].regmap,rt1[i]);
3457         if(hr>=0) {
3458           if(get_final_value(hr,i,&value))
3459                smrv[rt1[i]]=value;
3460           else smrv[rt1[i]]=constmap[i][hr];
3461           smrv_strong_next|=1<<rt1[i];
3462         }
3463       }
3464       else {
3465         if     ((smrv_strong>>rs1[i])&1) speculate_mov(rs1[i],rt1[i]);
3466         else if((smrv_weak>>rs1[i])&1) speculate_mov_weak(rs1[i],rt1[i]);
3467       }
3468       break;
3469     case LOAD:
3470       if(start<0x2000&&(rt1[i]==26||(smrv[rt1[i]]>>24)==0xa0)) {
3471         // special case for BIOS
3472         smrv[rt1[i]]=0xa0000000;
3473         smrv_strong_next|=1<<rt1[i];
3474         break;
3475       }
3476       // fallthrough
3477     case SHIFT:
3478     case LOADLR:
3479     case MOV:
3480       smrv_strong_next&=~(1<<rt1[i]);
3481       smrv_weak_next&=~(1<<rt1[i]);
3482       break;
3483     case COP0:
3484     case COP2:
3485       if(opcode2[i]==0||opcode2[i]==2) { // MFC/CFC
3486         smrv_strong_next&=~(1<<rt1[i]);
3487         smrv_weak_next&=~(1<<rt1[i]);
3488       }
3489       break;
3490     case C2LS:
3491       if (opcode[i]==0x32) { // LWC2
3492         smrv_strong_next&=~(1<<rt1[i]);
3493         smrv_weak_next&=~(1<<rt1[i]);
3494       }
3495       break;
3496   }
3497 #if 0
3498   int r=4;
3499   printf("x %08x %08x %d %d c %08x %08x\n",smrv[r],start+i*4,
3500     ((smrv_strong>>r)&1),(smrv_weak>>r)&1,regs[i].isconst,regs[i].wasconst);
3501 #endif
3502 }
3503
3504 void ds_assemble(int i,struct regstat *i_regs)
3505 {
3506   speculate_register_values(i);
3507   is_delayslot=1;
3508   switch(itype[i]) {
3509     case ALU:
3510       alu_assemble(i,i_regs);break;
3511     case IMM16:
3512       imm16_assemble(i,i_regs);break;
3513     case SHIFT:
3514       shift_assemble(i,i_regs);break;
3515     case SHIFTIMM:
3516       shiftimm_assemble(i,i_regs);break;
3517     case LOAD:
3518       load_assemble(i,i_regs);break;
3519     case LOADLR:
3520       loadlr_assemble(i,i_regs);break;
3521     case STORE:
3522       store_assemble(i,i_regs);break;
3523     case STORELR:
3524       storelr_assemble(i,i_regs);break;
3525     case COP0:
3526       cop0_assemble(i,i_regs);break;
3527     case COP1:
3528       cop1_assemble(i,i_regs);break;
3529     case C1LS:
3530       c1ls_assemble(i,i_regs);break;
3531     case COP2:
3532       cop2_assemble(i,i_regs);break;
3533     case C2LS:
3534       c2ls_assemble(i,i_regs);break;
3535     case C2OP:
3536       c2op_assemble(i,i_regs);break;
3537     case MULTDIV:
3538       multdiv_assemble(i,i_regs);break;
3539     case MOV:
3540       mov_assemble(i,i_regs);break;
3541     case SYSCALL:
3542     case HLECALL:
3543     case INTCALL:
3544     case SPAN:
3545     case UJUMP:
3546     case RJUMP:
3547     case CJUMP:
3548     case SJUMP:
3549       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
3550   }
3551   is_delayslot=0;
3552 }
3553
3554 // Is the branch target a valid internal jump?
3555 static int internal_branch(int addr)
3556 {
3557   if(addr&1) return 0; // Indirect (register) jump
3558   if(addr>=start && addr<start+slen*4-4)
3559   {
3560     return 1;
3561   }
3562   return 0;
3563 }
3564
3565 static void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t u)
3566 {
3567   int hr;
3568   for(hr=0;hr<HOST_REGS;hr++) {
3569     if(hr!=EXCLUDE_REG) {
3570       if(pre[hr]!=entry[hr]) {
3571         if(pre[hr]>=0) {
3572           if((dirty>>hr)&1) {
3573             if(get_reg(entry,pre[hr])<0) {
3574               assert(pre[hr]<64);
3575               if(!((u>>pre[hr])&1))
3576                 emit_storereg(pre[hr],hr);
3577             }
3578           }
3579         }
3580       }
3581     }
3582   }
3583   // Move from one register to another (no writeback)
3584   for(hr=0;hr<HOST_REGS;hr++) {
3585     if(hr!=EXCLUDE_REG) {
3586       if(pre[hr]!=entry[hr]) {
3587         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3588           int nr;
3589           if((nr=get_reg(entry,pre[hr]))>=0) {
3590             emit_mov(hr,nr);
3591           }
3592         }
3593       }
3594     }
3595   }
3596 }
3597
3598 // Load the specified registers
3599 // This only loads the registers given as arguments because
3600 // we don't want to load things that will be overwritten
3601 static void load_regs(signed char entry[],signed char regmap[],int rs1,int rs2)
3602 {
3603   int hr;
3604   // Load 32-bit regs
3605   for(hr=0;hr<HOST_REGS;hr++) {
3606     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3607       if(entry[hr]!=regmap[hr]) {
3608         if(regmap[hr]==rs1||regmap[hr]==rs2)
3609         {
3610           if(regmap[hr]==0) {
3611             emit_zeroreg(hr);
3612           }
3613           else
3614           {
3615             emit_loadreg(regmap[hr],hr);
3616           }
3617         }
3618       }
3619     }
3620   }
3621 }
3622
3623 // Load registers prior to the start of a loop
3624 // so that they are not loaded within the loop
3625 static void loop_preload(signed char pre[],signed char entry[])
3626 {
3627   int hr;
3628   for(hr=0;hr<HOST_REGS;hr++) {
3629     if(hr!=EXCLUDE_REG) {
3630       if(pre[hr]!=entry[hr]) {
3631         if(entry[hr]>=0) {
3632           if(get_reg(pre,entry[hr])<0) {
3633             assem_debug("loop preload:\n");
3634             //printf("loop preload: %d\n",hr);
3635             if(entry[hr]==0) {
3636               emit_zeroreg(hr);
3637             }
3638             else if(entry[hr]<TEMPREG)
3639             {
3640               emit_loadreg(entry[hr],hr);
3641             }
3642             else if(entry[hr]-64<TEMPREG)
3643             {
3644               emit_loadreg(entry[hr],hr);
3645             }
3646           }
3647         }
3648       }
3649     }
3650   }
3651 }
3652
3653 // Generate address for load/store instruction
3654 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3655 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3656 {
3657   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3658     int ra=-1;
3659     int agr=AGEN1+(i&1);
3660     if(itype[i]==LOAD) {
3661       ra=get_reg(i_regs->regmap,rt1[i]);
3662       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3663       assert(ra>=0);
3664     }
3665     if(itype[i]==LOADLR) {
3666       ra=get_reg(i_regs->regmap,FTEMP);
3667     }
3668     if(itype[i]==STORE||itype[i]==STORELR) {
3669       ra=get_reg(i_regs->regmap,agr);
3670       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3671     }
3672     if(itype[i]==C1LS||itype[i]==C2LS) {
3673       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3674         ra=get_reg(i_regs->regmap,FTEMP);
3675       else { // SWC1/SDC1/SWC2/SDC2
3676         ra=get_reg(i_regs->regmap,agr);
3677         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3678       }
3679     }
3680     int rs=get_reg(i_regs->regmap,rs1[i]);
3681     if(ra>=0) {
3682       int offset=imm[i];
3683       int c=(i_regs->wasconst>>rs)&1;
3684       if(rs1[i]==0) {
3685         // Using r0 as a base address
3686         if(!entry||entry[ra]!=agr) {
3687           if (opcode[i]==0x22||opcode[i]==0x26) {
3688             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3689           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3690             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3691           }else{
3692             emit_movimm(offset,ra);
3693           }
3694         } // else did it in the previous cycle
3695       }
3696       else if(rs<0) {
3697         if(!entry||entry[ra]!=rs1[i])
3698           emit_loadreg(rs1[i],ra);
3699         //if(!entry||entry[ra]!=rs1[i])
3700         //  printf("poor load scheduling!\n");
3701       }
3702       else if(c) {
3703         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3704           if(!entry||entry[ra]!=agr) {
3705             if (opcode[i]==0x22||opcode[i]==0x26) {
3706               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3707             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3708               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3709             }else{
3710               emit_movimm(constmap[i][rs]+offset,ra);
3711               regs[i].loadedconst|=1<<ra;
3712             }
3713           } // else did it in the previous cycle
3714         } // else load_consts already did it
3715       }
3716       if(offset&&!c&&rs1[i]) {
3717         if(rs>=0) {
3718           emit_addimm(rs,offset,ra);
3719         }else{
3720           emit_addimm(ra,offset,ra);
3721         }
3722       }
3723     }
3724   }
3725   // Preload constants for next instruction
3726   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3727     int agr,ra;
3728     // Actual address
3729     agr=AGEN1+((i+1)&1);
3730     ra=get_reg(i_regs->regmap,agr);
3731     if(ra>=0) {
3732       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
3733       int offset=imm[i+1];
3734       int c=(regs[i+1].wasconst>>rs)&1;
3735       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
3736         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3737           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3738         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3739           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3740         }else{
3741           emit_movimm(constmap[i+1][rs]+offset,ra);
3742           regs[i+1].loadedconst|=1<<ra;
3743         }
3744       }
3745       else if(rs1[i+1]==0) {
3746         // Using r0 as a base address
3747         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3748           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3749         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3750           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3751         }else{
3752           emit_movimm(offset,ra);
3753         }
3754       }
3755     }
3756   }
3757 }
3758
3759 static int get_final_value(int hr, int i, int *value)
3760 {
3761   int reg=regs[i].regmap[hr];
3762   while(i<slen-1) {
3763     if(regs[i+1].regmap[hr]!=reg) break;
3764     if(!((regs[i+1].isconst>>hr)&1)) break;
3765     if(bt[i+1]) break;
3766     i++;
3767   }
3768   if(i<slen-1) {
3769     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
3770       *value=constmap[i][hr];
3771       return 1;
3772     }
3773     if(!bt[i+1]) {
3774       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
3775         // Load in delay slot, out-of-order execution
3776         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
3777         {
3778           // Precompute load address
3779           *value=constmap[i][hr]+imm[i+2];
3780           return 1;
3781         }
3782       }
3783       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
3784       {
3785         // Precompute load address
3786         *value=constmap[i][hr]+imm[i+1];
3787         //printf("c=%x imm=%lx\n",(long)constmap[i][hr],imm[i+1]);
3788         return 1;
3789       }
3790     }
3791   }
3792   *value=constmap[i][hr];
3793   //printf("c=%lx\n",(long)constmap[i][hr]);
3794   if(i==slen-1) return 1;
3795   assert(reg < 64);
3796   return !((unneeded_reg[i+1]>>reg)&1);
3797 }
3798
3799 // Load registers with known constants
3800 static void load_consts(signed char pre[],signed char regmap[],int i)
3801 {
3802   int hr,hr2;
3803   // propagate loaded constant flags
3804   if(i==0||bt[i])
3805     regs[i].loadedconst=0;
3806   else {
3807     for(hr=0;hr<HOST_REGS;hr++) {
3808       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
3809          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
3810       {
3811         regs[i].loadedconst|=1<<hr;
3812       }
3813     }
3814   }
3815   // Load 32-bit regs
3816   for(hr=0;hr<HOST_REGS;hr++) {
3817     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3818       //if(entry[hr]!=regmap[hr]) {
3819       if(!((regs[i].loadedconst>>hr)&1)) {
3820         assert(regmap[hr]<64);
3821         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
3822           int value,similar=0;
3823           if(get_final_value(hr,i,&value)) {
3824             // see if some other register has similar value
3825             for(hr2=0;hr2<HOST_REGS;hr2++) {
3826               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
3827                 if(is_similar_value(value,constmap[i][hr2])) {
3828                   similar=1;
3829                   break;
3830                 }
3831               }
3832             }
3833             if(similar) {
3834               int value2;
3835               if(get_final_value(hr2,i,&value2)) // is this needed?
3836                 emit_movimm_from(value2,hr2,value,hr);
3837               else
3838                 emit_movimm(value,hr);
3839             }
3840             else if(value==0) {
3841               emit_zeroreg(hr);
3842             }
3843             else {
3844               emit_movimm(value,hr);
3845             }
3846           }
3847           regs[i].loadedconst|=1<<hr;
3848         }
3849       }
3850     }
3851   }
3852 }
3853
3854 void load_all_consts(signed char regmap[], u_int dirty, int i)
3855 {
3856   int hr;
3857   // Load 32-bit regs
3858   for(hr=0;hr<HOST_REGS;hr++) {
3859     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3860       assert(regmap[hr] < 64);
3861       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>0) {
3862         int value=constmap[i][hr];
3863         if(value==0) {
3864           emit_zeroreg(hr);
3865         }
3866         else {
3867           emit_movimm(value,hr);
3868         }
3869       }
3870     }
3871   }
3872 }
3873
3874 // Write out all dirty registers (except cycle count)
3875 static void wb_dirtys(signed char i_regmap[],uint64_t i_dirty)
3876 {
3877   int hr;
3878   for(hr=0;hr<HOST_REGS;hr++) {
3879     if(hr!=EXCLUDE_REG) {
3880       if(i_regmap[hr]>0) {
3881         if(i_regmap[hr]!=CCREG) {
3882           if((i_dirty>>hr)&1) {
3883             assert(i_regmap[hr]<64);
3884             emit_storereg(i_regmap[hr],hr);
3885           }
3886         }
3887       }
3888     }
3889   }
3890 }
3891
3892 // Write out dirty registers that we need to reload (pair with load_needed_regs)
3893 // This writes the registers not written by store_regs_bt
3894 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_dirty,int addr)
3895 {
3896   int hr;
3897   int t=(addr-start)>>2;
3898   for(hr=0;hr<HOST_REGS;hr++) {
3899     if(hr!=EXCLUDE_REG) {
3900       if(i_regmap[hr]>0) {
3901         if(i_regmap[hr]!=CCREG) {
3902           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1)) {
3903             if((i_dirty>>hr)&1) {
3904               assert(i_regmap[hr]<64);
3905               emit_storereg(i_regmap[hr],hr);
3906             }
3907           }
3908         }
3909       }
3910     }
3911   }
3912 }
3913
3914 // Load all registers (except cycle count)
3915 void load_all_regs(signed char i_regmap[])
3916 {
3917   int hr;
3918   for(hr=0;hr<HOST_REGS;hr++) {
3919     if(hr!=EXCLUDE_REG) {
3920       if(i_regmap[hr]==0) {
3921         emit_zeroreg(hr);
3922       }
3923       else
3924       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
3925       {
3926         emit_loadreg(i_regmap[hr],hr);
3927       }
3928     }
3929   }
3930 }
3931
3932 // Load all current registers also needed by next instruction
3933 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
3934 {
3935   int hr;
3936   for(hr=0;hr<HOST_REGS;hr++) {
3937     if(hr!=EXCLUDE_REG) {
3938       if(get_reg(next_regmap,i_regmap[hr])>=0) {
3939         if(i_regmap[hr]==0) {
3940           emit_zeroreg(hr);
3941         }
3942         else
3943         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
3944         {
3945           emit_loadreg(i_regmap[hr],hr);
3946         }
3947       }
3948     }
3949   }
3950 }
3951
3952 // Load all regs, storing cycle count if necessary
3953 void load_regs_entry(int t)
3954 {
3955   int hr;
3956   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
3957   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
3958   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
3959     emit_storereg(CCREG,HOST_CCREG);
3960   }
3961   // Load 32-bit regs
3962   for(hr=0;hr<HOST_REGS;hr++) {
3963     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
3964       if(regs[t].regmap_entry[hr]==0) {
3965         emit_zeroreg(hr);
3966       }
3967       else if(regs[t].regmap_entry[hr]!=CCREG)
3968       {
3969         emit_loadreg(regs[t].regmap_entry[hr],hr);
3970       }
3971     }
3972   }
3973 }
3974
3975 // Store dirty registers prior to branch
3976 void store_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
3977 {
3978   if(internal_branch(addr))
3979   {
3980     int t=(addr-start)>>2;
3981     int hr;
3982     for(hr=0;hr<HOST_REGS;hr++) {
3983       if(hr!=EXCLUDE_REG) {
3984         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
3985           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1)) {
3986             if((i_dirty>>hr)&1) {
3987               assert(i_regmap[hr]<64);
3988               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
3989                 emit_storereg(i_regmap[hr],hr);
3990             }
3991           }
3992         }
3993       }
3994     }
3995   }
3996   else
3997   {
3998     // Branch out of this block, write out all dirty regs
3999     wb_dirtys(i_regmap,i_dirty);
4000   }
4001 }
4002
4003 // Load all needed registers for branch target
4004 static void load_regs_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4005 {
4006   //if(addr>=start && addr<(start+slen*4))
4007   if(internal_branch(addr))
4008   {
4009     int t=(addr-start)>>2;
4010     int hr;
4011     // Store the cycle count before loading something else
4012     if(i_regmap[HOST_CCREG]!=CCREG) {
4013       assert(i_regmap[HOST_CCREG]==-1);
4014     }
4015     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4016       emit_storereg(CCREG,HOST_CCREG);
4017     }
4018     // Load 32-bit regs
4019     for(hr=0;hr<HOST_REGS;hr++) {
4020       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4021         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4022           if(regs[t].regmap_entry[hr]==0) {
4023             emit_zeroreg(hr);
4024           }
4025           else if(regs[t].regmap_entry[hr]!=CCREG)
4026           {
4027             emit_loadreg(regs[t].regmap_entry[hr],hr);
4028           }
4029         }
4030       }
4031     }
4032   }
4033 }
4034
4035 static int match_bt(signed char i_regmap[],uint64_t i_dirty,int addr)
4036 {
4037   if(addr>=start && addr<start+slen*4-4)
4038   {
4039     int t=(addr-start)>>2;
4040     int hr;
4041     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4042     for(hr=0;hr<HOST_REGS;hr++)
4043     {
4044       if(hr!=EXCLUDE_REG)
4045       {
4046         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4047         {
4048           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4049           {
4050             return 0;
4051           }
4052           else
4053           if((i_dirty>>hr)&1)
4054           {
4055             if(i_regmap[hr]<TEMPREG)
4056             {
4057               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4058                 return 0;
4059             }
4060             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4061             {
4062               assert(0);
4063             }
4064           }
4065         }
4066         else // Same register but is it 32-bit or dirty?
4067         if(i_regmap[hr]>=0)
4068         {
4069           if(!((regs[t].dirty>>hr)&1))
4070           {
4071             if((i_dirty>>hr)&1)
4072             {
4073               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4074               {
4075                 //printf("%x: dirty no match\n",addr);
4076                 return 0;
4077               }
4078             }
4079           }
4080         }
4081       }
4082     }
4083     // Delay slots are not valid branch targets
4084     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP)) return 0;
4085     // Delay slots require additional processing, so do not match
4086     if(is_ds[t]) return 0;
4087   }
4088   else
4089   {
4090     int hr;
4091     for(hr=0;hr<HOST_REGS;hr++)
4092     {
4093       if(hr!=EXCLUDE_REG)
4094       {
4095         if(i_regmap[hr]>=0)
4096         {
4097           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4098           {
4099             if((i_dirty>>hr)&1)
4100             {
4101               return 0;
4102             }
4103           }
4104         }
4105       }
4106     }
4107   }
4108   return 1;
4109 }
4110
4111 #ifdef DRC_DBG
4112 static void drc_dbg_emit_do_cmp(int i)
4113 {
4114   extern void do_insn_cmp();
4115   extern int cycle;
4116   u_int hr,reglist=0;
4117
4118   for(hr=0;hr<HOST_REGS;hr++)
4119     if(regs[i].regmap[hr]>=0) reglist|=1<<hr;
4120   save_regs(reglist);
4121   emit_movimm(start+i*4,0);
4122   emit_writeword(0,&pcaddr);
4123   emit_call(do_insn_cmp);
4124   //emit_readword(&cycle,0);
4125   //emit_addimm(0,2,0);
4126   //emit_writeword(0,&cycle);
4127   restore_regs(reglist);
4128 }
4129 #else
4130 #define drc_dbg_emit_do_cmp(x)
4131 #endif
4132
4133 // Used when a branch jumps into the delay slot of another branch
4134 void ds_assemble_entry(int i)
4135 {
4136   int t=(ba[i]-start)>>2;
4137   if (!instr_addr[t])
4138     instr_addr[t] = out;
4139   assem_debug("Assemble delay slot at %x\n",ba[i]);
4140   assem_debug("<->\n");
4141   drc_dbg_emit_do_cmp(t);
4142   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4143     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty);
4144   load_regs(regs[t].regmap_entry,regs[t].regmap,rs1[t],rs2[t]);
4145   address_generation(t,&regs[t],regs[t].regmap_entry);
4146   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4147     load_regs(regs[t].regmap_entry,regs[t].regmap,INVCP,INVCP);
4148   is_delayslot=0;
4149   switch(itype[t]) {
4150     case ALU:
4151       alu_assemble(t,&regs[t]);break;
4152     case IMM16:
4153       imm16_assemble(t,&regs[t]);break;
4154     case SHIFT:
4155       shift_assemble(t,&regs[t]);break;
4156     case SHIFTIMM:
4157       shiftimm_assemble(t,&regs[t]);break;
4158     case LOAD:
4159       load_assemble(t,&regs[t]);break;
4160     case LOADLR:
4161       loadlr_assemble(t,&regs[t]);break;
4162     case STORE:
4163       store_assemble(t,&regs[t]);break;
4164     case STORELR:
4165       storelr_assemble(t,&regs[t]);break;
4166     case COP0:
4167       cop0_assemble(t,&regs[t]);break;
4168     case COP1:
4169       cop1_assemble(t,&regs[t]);break;
4170     case C1LS:
4171       c1ls_assemble(t,&regs[t]);break;
4172     case COP2:
4173       cop2_assemble(t,&regs[t]);break;
4174     case C2LS:
4175       c2ls_assemble(t,&regs[t]);break;
4176     case C2OP:
4177       c2op_assemble(t,&regs[t]);break;
4178     case MULTDIV:
4179       multdiv_assemble(t,&regs[t]);break;
4180     case MOV:
4181       mov_assemble(t,&regs[t]);break;
4182     case SYSCALL:
4183     case HLECALL:
4184     case INTCALL:
4185     case SPAN:
4186     case UJUMP:
4187     case RJUMP:
4188     case CJUMP:
4189     case SJUMP:
4190       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4191   }
4192   store_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4193   load_regs_bt(regs[t].regmap,regs[t].dirty,ba[i]+4);
4194   if(internal_branch(ba[i]+4))
4195     assem_debug("branch: internal\n");
4196   else
4197     assem_debug("branch: external\n");
4198   assert(internal_branch(ba[i]+4));
4199   add_to_linker(out,ba[i]+4,internal_branch(ba[i]+4));
4200   emit_jmp(0);
4201 }
4202
4203 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4204 {
4205   int count;
4206   void *jaddr;
4207   void *idle=NULL;
4208   int t=0;
4209   if(itype[i]==RJUMP)
4210   {
4211     *adj=0;
4212   }
4213   //if(ba[i]>=start && ba[i]<(start+slen*4))
4214   if(internal_branch(ba[i]))
4215   {
4216     t=(ba[i]-start)>>2;
4217     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4218     else *adj=ccadj[t];
4219   }
4220   else
4221   {
4222     *adj=0;
4223   }
4224   count=ccadj[i];
4225   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4226     // Idle loop
4227     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4228     idle=out;
4229     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4230     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4231     jaddr=out;
4232     emit_jmp(0);
4233   }
4234   else if(*adj==0||invert) {
4235     int cycles=CLOCK_ADJUST(count+2);
4236     // faster loop HACK
4237     if (t&&*adj) {
4238       int rel=t-i;
4239       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4240         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4241     }
4242     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4243     jaddr=out;
4244     emit_jns(0);
4245   }
4246   else
4247   {
4248     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4249     jaddr=out;
4250     emit_jns(0);
4251   }
4252   add_stub(CC_STUB,jaddr,idle?idle:out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4253 }
4254
4255 static void do_ccstub(int n)
4256 {
4257   literal_pool(256);
4258   assem_debug("do_ccstub %lx\n",start+stubs[n].b*4);
4259   set_jump_target(stubs[n].addr, out);
4260   int i=stubs[n].b;
4261   if(stubs[n].d==NULLDS) {
4262     // Delay slot instruction is nullified ("likely" branch)
4263     wb_dirtys(regs[i].regmap,regs[i].dirty);
4264   }
4265   else if(stubs[n].d!=TAKEN) {
4266     wb_dirtys(branch_regs[i].regmap,branch_regs[i].dirty);
4267   }
4268   else {
4269     if(internal_branch(ba[i]))
4270       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4271   }
4272   if(stubs[n].c!=-1)
4273   {
4274     // Save PC as return address
4275     emit_movimm(stubs[n].c,EAX);
4276     emit_writeword(EAX,&pcaddr);
4277   }
4278   else
4279   {
4280     // Return address depends on which way the branch goes
4281     if(itype[i]==CJUMP||itype[i]==SJUMP)
4282     {
4283       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4284       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4285       if(rs1[i]==0)
4286       {
4287         s1l=s2l;
4288         s2l=-1;
4289       }
4290       else if(rs2[i]==0)
4291       {
4292         s2l=-1;
4293       }
4294       assert(s1l>=0);
4295       #ifdef DESTRUCTIVE_WRITEBACK
4296       if(rs1[i]) {
4297         if((branch_regs[i].dirty>>s1l)&&1)
4298           emit_loadreg(rs1[i],s1l);
4299       }
4300       else {
4301         if((branch_regs[i].dirty>>s1l)&1)
4302           emit_loadreg(rs2[i],s1l);
4303       }
4304       if(s2l>=0)
4305         if((branch_regs[i].dirty>>s2l)&1)
4306           emit_loadreg(rs2[i],s2l);
4307       #endif
4308       int hr=0;
4309       int addr=-1,alt=-1,ntaddr=-1;
4310       while(hr<HOST_REGS)
4311       {
4312         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4313            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4314            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4315         {
4316           addr=hr++;break;
4317         }
4318         hr++;
4319       }
4320       while(hr<HOST_REGS)
4321       {
4322         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4323            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4324            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4325         {
4326           alt=hr++;break;
4327         }
4328         hr++;
4329       }
4330       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4331       {
4332         while(hr<HOST_REGS)
4333         {
4334           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4335              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4336              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4337           {
4338             ntaddr=hr;break;
4339           }
4340           hr++;
4341         }
4342         assert(hr<HOST_REGS);
4343       }
4344       if((opcode[i]&0x2f)==4) // BEQ
4345       {
4346         #ifdef HAVE_CMOV_IMM
4347         if(s2l>=0) emit_cmp(s1l,s2l);
4348         else emit_test(s1l,s1l);
4349         emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4350         #else
4351         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4352         if(s2l>=0) emit_cmp(s1l,s2l);
4353         else emit_test(s1l,s1l);
4354         emit_cmovne_reg(alt,addr);
4355         #endif
4356       }
4357       if((opcode[i]&0x2f)==5) // BNE
4358       {
4359         #ifdef HAVE_CMOV_IMM
4360         if(s2l>=0) emit_cmp(s1l,s2l);
4361         else emit_test(s1l,s1l);
4362         emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4363         #else
4364         emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4365         if(s2l>=0) emit_cmp(s1l,s2l);
4366         else emit_test(s1l,s1l);
4367         emit_cmovne_reg(alt,addr);
4368         #endif
4369       }
4370       if((opcode[i]&0x2f)==6) // BLEZ
4371       {
4372         //emit_movimm(ba[i],alt);
4373         //emit_movimm(start+i*4+8,addr);
4374         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4375         emit_cmpimm(s1l,1);
4376         emit_cmovl_reg(alt,addr);
4377       }
4378       if((opcode[i]&0x2f)==7) // BGTZ
4379       {
4380         //emit_movimm(ba[i],addr);
4381         //emit_movimm(start+i*4+8,ntaddr);
4382         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4383         emit_cmpimm(s1l,1);
4384         emit_cmovl_reg(ntaddr,addr);
4385       }
4386       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4387       {
4388         //emit_movimm(ba[i],alt);
4389         //emit_movimm(start+i*4+8,addr);
4390         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4391         emit_test(s1l,s1l);
4392         emit_cmovs_reg(alt,addr);
4393       }
4394       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4395       {
4396         //emit_movimm(ba[i],addr);
4397         //emit_movimm(start+i*4+8,alt);
4398         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4399         emit_test(s1l,s1l);
4400         emit_cmovs_reg(alt,addr);
4401       }
4402       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4403         if(source[i]&0x10000) // BC1T
4404         {
4405           //emit_movimm(ba[i],alt);
4406           //emit_movimm(start+i*4+8,addr);
4407           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4408           emit_testimm(s1l,0x800000);
4409           emit_cmovne_reg(alt,addr);
4410         }
4411         else // BC1F
4412         {
4413           //emit_movimm(ba[i],addr);
4414           //emit_movimm(start+i*4+8,alt);
4415           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4416           emit_testimm(s1l,0x800000);
4417           emit_cmovne_reg(alt,addr);
4418         }
4419       }
4420       emit_writeword(addr,&pcaddr);
4421     }
4422     else
4423     if(itype[i]==RJUMP)
4424     {
4425       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4426       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4427         r=get_reg(branch_regs[i].regmap,RTEMP);
4428       }
4429       emit_writeword(r,&pcaddr);
4430     }
4431     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
4432   }
4433   // Update cycle count
4434   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4435   if(stubs[n].a) emit_addimm(HOST_CCREG,CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
4436   emit_call(cc_interrupt);
4437   if(stubs[n].a) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((signed int)stubs[n].a),HOST_CCREG);
4438   if(stubs[n].d==TAKEN) {
4439     if(internal_branch(ba[i]))
4440       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4441     else if(itype[i]==RJUMP) {
4442       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4443         emit_readword(&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4444       else
4445         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4446     }
4447   }else if(stubs[n].d==NOTTAKEN) {
4448     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4449     else load_all_regs(branch_regs[i].regmap);
4450   }else if(stubs[n].d==NULLDS) {
4451     // Delay slot instruction is nullified ("likely" branch)
4452     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4453     else load_all_regs(regs[i].regmap);
4454   }else{
4455     load_all_regs(branch_regs[i].regmap);
4456   }
4457   emit_jmp(stubs[n].retaddr);
4458 }
4459
4460 static void add_to_linker(void *addr, u_int target, int ext)
4461 {
4462   assert(linkcount < ARRAY_SIZE(link_addr));
4463   link_addr[linkcount].addr = addr;
4464   link_addr[linkcount].target = target;
4465   link_addr[linkcount].ext = ext;
4466   linkcount++;
4467 }
4468
4469 static void ujump_assemble_write_ra(int i)
4470 {
4471   int rt;
4472   unsigned int return_address;
4473   rt=get_reg(branch_regs[i].regmap,31);
4474   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4475   //assert(rt>=0);
4476   return_address=start+i*4+8;
4477   if(rt>=0) {
4478     #ifdef USE_MINI_HT
4479     if(internal_branch(return_address)&&rt1[i+1]!=31) {
4480       int temp=-1; // note: must be ds-safe
4481       #ifdef HOST_TEMPREG
4482       temp=HOST_TEMPREG;
4483       #endif
4484       if(temp>=0) do_miniht_insert(return_address,rt,temp);
4485       else emit_movimm(return_address,rt);
4486     }
4487     else
4488     #endif
4489     {
4490       #ifdef REG_PREFETCH
4491       if(temp>=0)
4492       {
4493         if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4494       }
4495       #endif
4496       emit_movimm(return_address,rt); // PC into link register
4497       #ifdef IMM_PREFETCH
4498       emit_prefetch(hash_table_get(return_address));
4499       #endif
4500     }
4501   }
4502 }
4503
4504 void ujump_assemble(int i,struct regstat *i_regs)
4505 {
4506   int ra_done=0;
4507   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4508   address_generation(i+1,i_regs,regs[i].regmap_entry);
4509   #ifdef REG_PREFETCH
4510   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4511   if(rt1[i]==31&&temp>=0)
4512   {
4513     signed char *i_regmap=i_regs->regmap;
4514     int return_address=start+i*4+8;
4515     if(get_reg(branch_regs[i].regmap,31)>0)
4516     if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4517   }
4518   #endif
4519   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4520     ujump_assemble_write_ra(i); // writeback ra for DS
4521     ra_done=1;
4522   }
4523   ds_assemble(i+1,i_regs);
4524   uint64_t bc_unneeded=branch_regs[i].u;
4525   bc_unneeded|=1|(1LL<<rt1[i]);
4526   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
4527   load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
4528   if(!ra_done&&rt1[i]==31)
4529     ujump_assemble_write_ra(i);
4530   int cc,adj;
4531   cc=get_reg(branch_regs[i].regmap,CCREG);
4532   assert(cc==HOST_CCREG);
4533   store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4534   #ifdef REG_PREFETCH
4535   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4536   #endif
4537   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4538   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4539   load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4540   if(internal_branch(ba[i]))
4541     assem_debug("branch: internal\n");
4542   else
4543     assem_debug("branch: external\n");
4544   if(internal_branch(ba[i])&&is_ds[(ba[i]-start)>>2]) {
4545     ds_assemble_entry(i);
4546   }
4547   else {
4548     add_to_linker(out,ba[i],internal_branch(ba[i]));
4549     emit_jmp(0);
4550   }
4551 }
4552
4553 static void rjump_assemble_write_ra(int i)
4554 {
4555   int rt,return_address;
4556   assert(rt1[i+1]!=rt1[i]);
4557   assert(rt2[i+1]!=rt1[i]);
4558   rt=get_reg(branch_regs[i].regmap,rt1[i]);
4559   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4560   assert(rt>=0);
4561   return_address=start+i*4+8;
4562   #ifdef REG_PREFETCH
4563   if(temp>=0)
4564   {
4565     if(i_regmap[temp]!=PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4566   }
4567   #endif
4568   emit_movimm(return_address,rt); // PC into link register
4569   #ifdef IMM_PREFETCH
4570   emit_prefetch(hash_table_get(return_address));
4571   #endif
4572 }
4573
4574 void rjump_assemble(int i,struct regstat *i_regs)
4575 {
4576   int temp;
4577   int rs,cc;
4578   int ra_done=0;
4579   rs=get_reg(branch_regs[i].regmap,rs1[i]);
4580   assert(rs>=0);
4581   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4582     // Delay slot abuse, make a copy of the branch address register
4583     temp=get_reg(branch_regs[i].regmap,RTEMP);
4584     assert(temp>=0);
4585     assert(regs[i].regmap[temp]==RTEMP);
4586     emit_mov(rs,temp);
4587     rs=temp;
4588   }
4589   address_generation(i+1,i_regs,regs[i].regmap_entry);
4590   #ifdef REG_PREFETCH
4591   if(rt1[i]==31)
4592   {
4593     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4594       signed char *i_regmap=i_regs->regmap;
4595       int return_address=start+i*4+8;
4596       if(i_regmap[temp]==PTEMP) emit_movimm((uintptr_t)hash_table_get(return_address),temp);
4597     }
4598   }
4599   #endif
4600   #ifdef USE_MINI_HT
4601   if(rs1[i]==31) {
4602     int rh=get_reg(regs[i].regmap,RHASH);
4603     if(rh>=0) do_preload_rhash(rh);
4604   }
4605   #endif
4606   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4607     rjump_assemble_write_ra(i);
4608     ra_done=1;
4609   }
4610   ds_assemble(i+1,i_regs);
4611   uint64_t bc_unneeded=branch_regs[i].u;
4612   bc_unneeded|=1|(1LL<<rt1[i]);
4613   bc_unneeded&=~(1LL<<rs1[i]);
4614   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
4615   load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],CCREG);
4616   if(!ra_done&&rt1[i]!=0)
4617     rjump_assemble_write_ra(i);
4618   cc=get_reg(branch_regs[i].regmap,CCREG);
4619   assert(cc==HOST_CCREG);
4620   (void)cc;
4621   #ifdef USE_MINI_HT
4622   int rh=get_reg(branch_regs[i].regmap,RHASH);
4623   int ht=get_reg(branch_regs[i].regmap,RHTBL);
4624   if(rs1[i]==31) {
4625     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4626     do_preload_rhtbl(ht);
4627     do_rhash(rs,rh);
4628   }
4629   #endif
4630   store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
4631   #ifdef DESTRUCTIVE_WRITEBACK
4632   if((branch_regs[i].dirty>>rs)&1) {
4633     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4634       emit_loadreg(rs1[i],rs);
4635     }
4636   }
4637   #endif
4638   #ifdef REG_PREFETCH
4639   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4640   #endif
4641   #ifdef USE_MINI_HT
4642   if(rs1[i]==31) {
4643     do_miniht_load(ht,rh);
4644   }
4645   #endif
4646   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4647   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4648   //assert(adj==0);
4649   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4650   add_stub(CC_STUB,out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
4651   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4652     // special case for RFE
4653     emit_jmp(0);
4654   else
4655     emit_jns(0);
4656   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,-1);
4657   #ifdef USE_MINI_HT
4658   if(rs1[i]==31) {
4659     do_miniht_jump(rs,rh,ht);
4660   }
4661   else
4662   #endif
4663   {
4664     emit_jmp(jump_vaddr_reg[rs]);
4665   }
4666   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4667   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4668   #endif
4669 }
4670
4671 void cjump_assemble(int i,struct regstat *i_regs)
4672 {
4673   signed char *i_regmap=i_regs->regmap;
4674   int cc;
4675   int match;
4676   match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4677   assem_debug("match=%d\n",match);
4678   int s1l,s2l;
4679   int unconditional=0,nop=0;
4680   int invert=0;
4681   int internal=internal_branch(ba[i]);
4682   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4683   if(!match) invert=1;
4684   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4685   if(i>(ba[i]-start)>>2) invert=1;
4686   #endif
4687
4688   if(ooo[i]) {
4689     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4690     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4691   }
4692   else {
4693     s1l=get_reg(i_regmap,rs1[i]);
4694     s2l=get_reg(i_regmap,rs2[i]);
4695   }
4696   if(rs1[i]==0&&rs2[i]==0)
4697   {
4698     if(opcode[i]&1) nop=1;
4699     else unconditional=1;
4700     //assert(opcode[i]!=5);
4701     //assert(opcode[i]!=7);
4702     //assert(opcode[i]!=0x15);
4703     //assert(opcode[i]!=0x17);
4704   }
4705   else if(rs1[i]==0)
4706   {
4707     s1l=s2l;
4708     s2l=-1;
4709   }
4710   else if(rs2[i]==0)
4711   {
4712     s2l=-1;
4713   }
4714
4715   if(ooo[i]) {
4716     // Out of order execution (delay slot first)
4717     //printf("OOOE\n");
4718     address_generation(i+1,i_regs,regs[i].regmap_entry);
4719     ds_assemble(i+1,i_regs);
4720     int adj;
4721     uint64_t bc_unneeded=branch_regs[i].u;
4722     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
4723     bc_unneeded|=1;
4724     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
4725     load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs2[i]);
4726     load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
4727     cc=get_reg(branch_regs[i].regmap,CCREG);
4728     assert(cc==HOST_CCREG);
4729     if(unconditional)
4730       store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4731     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
4732     //assem_debug("cycle count (adj)\n");
4733     if(unconditional) {
4734       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4735       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
4736         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4737         load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4738         if(internal)
4739           assem_debug("branch: internal\n");
4740         else
4741           assem_debug("branch: external\n");
4742         if(internal&&is_ds[(ba[i]-start)>>2]) {
4743           ds_assemble_entry(i);
4744         }
4745         else {
4746           add_to_linker(out,ba[i],internal);
4747           emit_jmp(0);
4748         }
4749         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4750         if(((u_int)out)&7) emit_addnop(0);
4751         #endif
4752       }
4753     }
4754     else if(nop) {
4755       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
4756       void *jaddr=out;
4757       emit_jns(0);
4758       add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
4759     }
4760     else {
4761       void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
4762       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
4763       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4764
4765       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4766       assert(s1l>=0);
4767       if(opcode[i]==4) // BEQ
4768       {
4769         if(s2l>=0) emit_cmp(s1l,s2l);
4770         else emit_test(s1l,s1l);
4771         if(invert){
4772           nottaken=out;
4773           emit_jne((void *)1l);
4774         }else{
4775           add_to_linker(out,ba[i],internal);
4776           emit_jeq(0);
4777         }
4778       }
4779       if(opcode[i]==5) // BNE
4780       {
4781         if(s2l>=0) emit_cmp(s1l,s2l);
4782         else emit_test(s1l,s1l);
4783         if(invert){
4784           nottaken=out;
4785           emit_jeq(1);
4786         }else{
4787           add_to_linker(out,ba[i],internal);
4788           emit_jne(0);
4789         }
4790       }
4791       if(opcode[i]==6) // BLEZ
4792       {
4793         emit_cmpimm(s1l,1);
4794         if(invert){
4795           nottaken=out;
4796           emit_jge(1);
4797         }else{
4798           add_to_linker(out,ba[i],internal);
4799           emit_jl(0);
4800         }
4801       }
4802       if(opcode[i]==7) // BGTZ
4803       {
4804         emit_cmpimm(s1l,1);
4805         if(invert){
4806           nottaken=out;
4807           emit_jl(1);
4808         }else{
4809           add_to_linker(out,ba[i],internal);
4810           emit_jge(0);
4811         }
4812       }
4813       if(invert) {
4814         if(taken) set_jump_target(taken, out);
4815         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4816         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
4817           if(adj) {
4818             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
4819             add_to_linker(out,ba[i],internal);
4820           }else{
4821             emit_addnop(13);
4822             add_to_linker(out,ba[i],internal*2);
4823           }
4824           emit_jmp(0);
4825         }else
4826         #endif
4827         {
4828           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
4829           store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4830           load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4831           if(internal)
4832             assem_debug("branch: internal\n");
4833           else
4834             assem_debug("branch: external\n");
4835           if(internal&&is_ds[(ba[i]-start)>>2]) {
4836             ds_assemble_entry(i);
4837           }
4838           else {
4839             add_to_linker(out,ba[i],internal);
4840             emit_jmp(0);
4841           }
4842         }
4843         set_jump_target(nottaken, out);
4844       }
4845
4846       if(nottaken1) set_jump_target(nottaken1, out);
4847       if(adj) {
4848         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
4849       }
4850     } // (!unconditional)
4851   } // if(ooo)
4852   else
4853   {
4854     // In-order execution (branch first)
4855     //if(likely[i]) printf("IOL\n");
4856     //else
4857     //printf("IOE\n");
4858     void *taken = NULL, *nottaken = NULL, *nottaken1 = NULL;
4859     if(!unconditional&&!nop) {
4860       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4861       assert(s1l>=0);
4862       if((opcode[i]&0x2f)==4) // BEQ
4863       {
4864         if(s2l>=0) emit_cmp(s1l,s2l);
4865         else emit_test(s1l,s1l);
4866         nottaken=out;
4867         emit_jne((void *)2l);
4868       }
4869       if((opcode[i]&0x2f)==5) // BNE
4870       {
4871         if(s2l>=0) emit_cmp(s1l,s2l);
4872         else emit_test(s1l,s1l);
4873         nottaken=out;
4874         emit_jeq(2);
4875       }
4876       if((opcode[i]&0x2f)==6) // BLEZ
4877       {
4878         emit_cmpimm(s1l,1);
4879         nottaken=out;
4880         emit_jge(2);
4881       }
4882       if((opcode[i]&0x2f)==7) // BGTZ
4883       {
4884         emit_cmpimm(s1l,1);
4885         nottaken=out;
4886         emit_jl(2);
4887       }
4888     } // if(!unconditional)
4889     int adj;
4890     uint64_t ds_unneeded=branch_regs[i].u;
4891     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
4892     ds_unneeded|=1;
4893     // branch taken
4894     if(!nop) {
4895       if(taken) set_jump_target(taken, out);
4896       assem_debug("1:\n");
4897       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
4898       // load regs
4899       load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
4900       address_generation(i+1,&branch_regs[i],0);
4901       load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
4902       ds_assemble(i+1,&branch_regs[i]);
4903       cc=get_reg(branch_regs[i].regmap,CCREG);
4904       if(cc==-1) {
4905         emit_loadreg(CCREG,cc=HOST_CCREG);
4906         // CHECK: Is the following instruction (fall thru) allocated ok?
4907       }
4908       assert(cc==HOST_CCREG);
4909       store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4910       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
4911       assem_debug("cycle count (adj)\n");
4912       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4913       load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4914       if(internal)
4915         assem_debug("branch: internal\n");
4916       else
4917         assem_debug("branch: external\n");
4918       if(internal&&is_ds[(ba[i]-start)>>2]) {
4919         ds_assemble_entry(i);
4920       }
4921       else {
4922         add_to_linker(out,ba[i],internal);
4923         emit_jmp(0);
4924       }
4925     }
4926     // branch not taken
4927     if(!unconditional) {
4928       if(nottaken1) set_jump_target(nottaken1, out);
4929       set_jump_target(nottaken, out);
4930       assem_debug("2:\n");
4931       if(!likely[i]) {
4932         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
4933         load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
4934         address_generation(i+1,&branch_regs[i],0);
4935         load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
4936         ds_assemble(i+1,&branch_regs[i]);
4937       }
4938       cc=get_reg(branch_regs[i].regmap,CCREG);
4939       if(cc==-1&&!likely[i]) {
4940         // Cycle count isn't in a register, temporarily load it then write it out
4941         emit_loadreg(CCREG,HOST_CCREG);
4942         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4943         void *jaddr=out;
4944         emit_jns(0);
4945         add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
4946         emit_storereg(CCREG,HOST_CCREG);
4947       }
4948       else{
4949         cc=get_reg(i_regmap,CCREG);
4950         assert(cc==HOST_CCREG);
4951         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
4952         void *jaddr=out;
4953         emit_jns(0);
4954         add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
4955       }
4956     }
4957   }
4958 }
4959
4960 void sjump_assemble(int i,struct regstat *i_regs)
4961 {
4962   signed char *i_regmap=i_regs->regmap;
4963   int cc;
4964   int match;
4965   match=match_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
4966   assem_debug("smatch=%d\n",match);
4967   int s1l;
4968   int unconditional=0,nevertaken=0;
4969   int invert=0;
4970   int internal=internal_branch(ba[i]);
4971   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4972   if(!match) invert=1;
4973   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4974   if(i>(ba[i]-start)>>2) invert=1;
4975   #endif
4976
4977   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
4978   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
4979
4980   if(ooo[i]) {
4981     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4982   }
4983   else {
4984     s1l=get_reg(i_regmap,rs1[i]);
4985   }
4986   if(rs1[i]==0)
4987   {
4988     if(opcode2[i]&1) unconditional=1;
4989     else nevertaken=1;
4990     // These are never taken (r0 is never less than zero)
4991     //assert(opcode2[i]!=0);
4992     //assert(opcode2[i]!=2);
4993     //assert(opcode2[i]!=0x10);
4994     //assert(opcode2[i]!=0x12);
4995   }
4996
4997   if(ooo[i]) {
4998     // Out of order execution (delay slot first)
4999     //printf("OOOE\n");
5000     address_generation(i+1,i_regs,regs[i].regmap_entry);
5001     ds_assemble(i+1,i_regs);
5002     int adj;
5003     uint64_t bc_unneeded=branch_regs[i].u;
5004     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5005     bc_unneeded|=1;
5006     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,bc_unneeded);
5007     load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i],rs1[i]);
5008     load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5009     if(rt1[i]==31) {
5010       int rt,return_address;
5011       rt=get_reg(branch_regs[i].regmap,31);
5012       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5013       if(rt>=0) {
5014         // Save the PC even if the branch is not taken
5015         return_address=start+i*4+8;
5016         emit_movimm(return_address,rt); // PC into link register
5017         #ifdef IMM_PREFETCH
5018         if(!nevertaken) emit_prefetch(hash_table_get(return_address));
5019         #endif
5020       }
5021     }
5022     cc=get_reg(branch_regs[i].regmap,CCREG);
5023     assert(cc==HOST_CCREG);
5024     if(unconditional)
5025       store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5026     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5027     assem_debug("cycle count (adj)\n");
5028     if(unconditional) {
5029       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5030       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5031         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5032         load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5033         if(internal)
5034           assem_debug("branch: internal\n");
5035         else
5036           assem_debug("branch: external\n");
5037         if(internal&&is_ds[(ba[i]-start)>>2]) {
5038           ds_assemble_entry(i);
5039         }
5040         else {
5041           add_to_linker(out,ba[i],internal);
5042           emit_jmp(0);
5043         }
5044         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5045         if(((u_int)out)&7) emit_addnop(0);
5046         #endif
5047       }
5048     }
5049     else if(nevertaken) {
5050       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5051       void *jaddr=out;
5052       emit_jns(0);
5053       add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5054     }
5055     else {
5056       void *nottaken = NULL;
5057       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5058       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5059       {
5060         assert(s1l>=0);
5061         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5062         {
5063           emit_test(s1l,s1l);
5064           if(invert){
5065             nottaken=out;
5066             emit_jns(1);
5067           }else{
5068             add_to_linker(out,ba[i],internal);
5069             emit_js(0);
5070           }
5071         }
5072         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5073         {
5074           emit_test(s1l,s1l);
5075           if(invert){
5076             nottaken=out;
5077             emit_js(1);
5078           }else{
5079             add_to_linker(out,ba[i],internal);
5080             emit_jns(0);
5081           }
5082         }
5083       }
5084
5085       if(invert) {
5086         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5087         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5088           if(adj) {
5089             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5090             add_to_linker(out,ba[i],internal);
5091           }else{
5092             emit_addnop(13);
5093             add_to_linker(out,ba[i],internal*2);
5094           }
5095           emit_jmp(0);
5096         }else
5097         #endif
5098         {
5099           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5100           store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5101           load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5102           if(internal)
5103             assem_debug("branch: internal\n");
5104           else
5105             assem_debug("branch: external\n");
5106           if(internal&&is_ds[(ba[i]-start)>>2]) {
5107             ds_assemble_entry(i);
5108           }
5109           else {
5110             add_to_linker(out,ba[i],internal);
5111             emit_jmp(0);
5112           }
5113         }
5114         set_jump_target(nottaken, out);
5115       }
5116
5117       if(adj) {
5118         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5119       }
5120     } // (!unconditional)
5121   } // if(ooo)
5122   else
5123   {
5124     // In-order execution (branch first)
5125     //printf("IOE\n");
5126     void *nottaken = NULL;
5127     if(rt1[i]==31) {
5128       int rt,return_address;
5129       rt=get_reg(branch_regs[i].regmap,31);
5130       if(rt>=0) {
5131         // Save the PC even if the branch is not taken
5132         return_address=start+i*4+8;
5133         emit_movimm(return_address,rt); // PC into link register
5134         #ifdef IMM_PREFETCH
5135         emit_prefetch(hash_table_get(return_address));
5136         #endif
5137       }
5138     }
5139     if(!unconditional) {
5140       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5141         assert(s1l>=0);
5142         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5143         {
5144           emit_test(s1l,s1l);
5145           nottaken=out;
5146           emit_jns(1);
5147         }
5148         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5149         {
5150           emit_test(s1l,s1l);
5151           nottaken=out;
5152           emit_js(1);
5153         }
5154     } // if(!unconditional)
5155     int adj;
5156     uint64_t ds_unneeded=branch_regs[i].u;
5157     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5158     ds_unneeded|=1;
5159     // branch taken
5160     if(!nevertaken) {
5161       //assem_debug("1:\n");
5162       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5163       // load regs
5164       load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5165       address_generation(i+1,&branch_regs[i],0);
5166       load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,INVCP);
5167       ds_assemble(i+1,&branch_regs[i]);
5168       cc=get_reg(branch_regs[i].regmap,CCREG);
5169       if(cc==-1) {
5170         emit_loadreg(CCREG,cc=HOST_CCREG);
5171         // CHECK: Is the following instruction (fall thru) allocated ok?
5172       }
5173       assert(cc==HOST_CCREG);
5174       store_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5175       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5176       assem_debug("cycle count (adj)\n");
5177       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5178       load_regs_bt(branch_regs[i].regmap,branch_regs[i].dirty,ba[i]);
5179       if(internal)
5180         assem_debug("branch: internal\n");
5181       else
5182         assem_debug("branch: external\n");
5183       if(internal&&is_ds[(ba[i]-start)>>2]) {
5184         ds_assemble_entry(i);
5185       }
5186       else {
5187         add_to_linker(out,ba[i],internal);
5188         emit_jmp(0);
5189       }
5190     }
5191     // branch not taken
5192     if(!unconditional) {
5193       set_jump_target(nottaken, out);
5194       assem_debug("1:\n");
5195       if(!likely[i]) {
5196         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,ds_unneeded);
5197         load_regs(regs[i].regmap,branch_regs[i].regmap,rs1[i+1],rs2[i+1]);
5198         address_generation(i+1,&branch_regs[i],0);
5199         load_regs(regs[i].regmap,branch_regs[i].regmap,CCREG,CCREG);
5200         ds_assemble(i+1,&branch_regs[i]);
5201       }
5202       cc=get_reg(branch_regs[i].regmap,CCREG);
5203       if(cc==-1&&!likely[i]) {
5204         // Cycle count isn't in a register, temporarily load it then write it out
5205         emit_loadreg(CCREG,HOST_CCREG);
5206         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5207         void *jaddr=out;
5208         emit_jns(0);
5209         add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,NOTTAKEN,0);
5210         emit_storereg(CCREG,HOST_CCREG);
5211       }
5212       else{
5213         cc=get_reg(i_regmap,CCREG);
5214         assert(cc==HOST_CCREG);
5215         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5216         void *jaddr=out;
5217         emit_jns(0);
5218         add_stub(CC_STUB,jaddr,out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5219       }
5220     }
5221   }
5222 }
5223
5224 static void pagespan_assemble(int i,struct regstat *i_regs)
5225 {
5226   int s1l=get_reg(i_regs->regmap,rs1[i]);
5227   int s2l=get_reg(i_regs->regmap,rs2[i]);
5228   void *taken = NULL;
5229   void *nottaken = NULL;
5230   int unconditional=0;
5231   if(rs1[i]==0)
5232   {
5233     s1l=s2l;
5234     s2l=-1;
5235   }
5236   else if(rs2[i]==0)
5237   {
5238     s2l=-1;
5239   }
5240   int hr=0;
5241   int addr=-1,alt=-1,ntaddr=-1;
5242   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5243   else {
5244     while(hr<HOST_REGS)
5245     {
5246       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5247          (i_regs->regmap[hr]&63)!=rs1[i] &&
5248          (i_regs->regmap[hr]&63)!=rs2[i] )
5249       {
5250         addr=hr++;break;
5251       }
5252       hr++;
5253     }
5254   }
5255   while(hr<HOST_REGS)
5256   {
5257     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5258        (i_regs->regmap[hr]&63)!=rs1[i] &&
5259        (i_regs->regmap[hr]&63)!=rs2[i] )
5260     {
5261       alt=hr++;break;
5262     }
5263     hr++;
5264   }
5265   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5266   {
5267     while(hr<HOST_REGS)
5268     {
5269       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5270          (i_regs->regmap[hr]&63)!=rs1[i] &&
5271          (i_regs->regmap[hr]&63)!=rs2[i] )
5272       {
5273         ntaddr=hr;break;
5274       }
5275       hr++;
5276     }
5277   }
5278   assert(hr<HOST_REGS);
5279   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5280     load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
5281   }
5282   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5283   if(opcode[i]==2) // J
5284   {
5285     unconditional=1;
5286   }
5287   if(opcode[i]==3) // JAL
5288   {
5289     // TODO: mini_ht
5290     int rt=get_reg(i_regs->regmap,31);
5291     emit_movimm(start+i*4+8,rt);
5292     unconditional=1;
5293   }
5294   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5295   {
5296     emit_mov(s1l,addr);
5297     if(opcode2[i]==9) // JALR
5298     {
5299       int rt=get_reg(i_regs->regmap,rt1[i]);
5300       emit_movimm(start+i*4+8,rt);
5301     }
5302   }
5303   if((opcode[i]&0x3f)==4) // BEQ
5304   {
5305     if(rs1[i]==rs2[i])
5306     {
5307       unconditional=1;
5308     }
5309     else
5310     #ifdef HAVE_CMOV_IMM
5311     if(1) {
5312       if(s2l>=0) emit_cmp(s1l,s2l);
5313       else emit_test(s1l,s1l);
5314       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5315     }
5316     else
5317     #endif
5318     {
5319       assert(s1l>=0);
5320       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5321       if(s2l>=0) emit_cmp(s1l,s2l);
5322       else emit_test(s1l,s1l);
5323       emit_cmovne_reg(alt,addr);
5324     }
5325   }
5326   if((opcode[i]&0x3f)==5) // BNE
5327   {
5328     #ifdef HAVE_CMOV_IMM
5329     if(s2l>=0) emit_cmp(s1l,s2l);
5330     else emit_test(s1l,s1l);
5331     emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5332     #else
5333     assert(s1l>=0);
5334     emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5335     if(s2l>=0) emit_cmp(s1l,s2l);
5336     else emit_test(s1l,s1l);
5337     emit_cmovne_reg(alt,addr);
5338     #endif
5339   }
5340   if((opcode[i]&0x3f)==0x14) // BEQL
5341   {
5342     if(s2l>=0) emit_cmp(s1l,s2l);
5343     else emit_test(s1l,s1l);
5344     if(nottaken) set_jump_target(nottaken, out);
5345     nottaken=out;
5346     emit_jne(0);
5347   }
5348   if((opcode[i]&0x3f)==0x15) // BNEL
5349   {
5350     if(s2l>=0) emit_cmp(s1l,s2l);
5351     else emit_test(s1l,s1l);
5352     nottaken=out;
5353     emit_jeq(0);
5354     if(taken) set_jump_target(taken, out);
5355   }
5356   if((opcode[i]&0x3f)==6) // BLEZ
5357   {
5358     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5359     emit_cmpimm(s1l,1);
5360     emit_cmovl_reg(alt,addr);
5361   }
5362   if((opcode[i]&0x3f)==7) // BGTZ
5363   {
5364     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5365     emit_cmpimm(s1l,1);
5366     emit_cmovl_reg(ntaddr,addr);
5367   }
5368   if((opcode[i]&0x3f)==0x16) // BLEZL
5369   {
5370     assert((opcode[i]&0x3f)!=0x16);
5371   }
5372   if((opcode[i]&0x3f)==0x17) // BGTZL
5373   {
5374     assert((opcode[i]&0x3f)!=0x17);
5375   }
5376   assert(opcode[i]!=1); // BLTZ/BGEZ
5377
5378   //FIXME: Check CSREG
5379   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5380     if((source[i]&0x30000)==0) // BC1F
5381     {
5382       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5383       emit_testimm(s1l,0x800000);
5384       emit_cmovne_reg(alt,addr);
5385     }
5386     if((source[i]&0x30000)==0x10000) // BC1T
5387     {
5388       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5389       emit_testimm(s1l,0x800000);
5390       emit_cmovne_reg(alt,addr);
5391     }
5392     if((source[i]&0x30000)==0x20000) // BC1FL
5393     {
5394       emit_testimm(s1l,0x800000);
5395       nottaken=out;
5396       emit_jne(0);
5397     }
5398     if((source[i]&0x30000)==0x30000) // BC1TL
5399     {
5400       emit_testimm(s1l,0x800000);
5401       nottaken=out;
5402       emit_jeq(0);
5403     }
5404   }
5405
5406   assert(i_regs->regmap[HOST_CCREG]==CCREG);
5407   wb_dirtys(regs[i].regmap,regs[i].dirty);
5408   if(likely[i]||unconditional)
5409   {
5410     emit_movimm(ba[i],HOST_BTREG);
5411   }
5412   else if(addr!=HOST_BTREG)
5413   {
5414     emit_mov(addr,HOST_BTREG);
5415   }
5416   void *branch_addr=out;
5417   emit_jmp(0);
5418   int target_addr=start+i*4+5;
5419   void *stub=out;
5420   void *compiled_target_addr=check_addr(target_addr);
5421   emit_extjump_ds(branch_addr, target_addr);
5422   if(compiled_target_addr) {
5423     set_jump_target(branch_addr, compiled_target_addr);
5424     add_link(target_addr,stub);
5425   }
5426   else set_jump_target(branch_addr, stub);
5427   if(likely[i]) {
5428     // Not-taken path
5429     set_jump_target(nottaken, out);
5430     wb_dirtys(regs[i].regmap,regs[i].dirty);
5431     void *branch_addr=out;
5432     emit_jmp(0);
5433     int target_addr=start+i*4+8;
5434     void *stub=out;
5435     void *compiled_target_addr=check_addr(target_addr);
5436     emit_extjump_ds(branch_addr, target_addr);
5437     if(compiled_target_addr) {
5438       set_jump_target(branch_addr, compiled_target_addr);
5439       add_link(target_addr,stub);
5440     }
5441     else set_jump_target(branch_addr, stub);
5442   }
5443 }
5444
5445 // Assemble the delay slot for the above
5446 static void pagespan_ds()
5447 {
5448   assem_debug("initial delay slot:\n");
5449   u_int vaddr=start+1;
5450   u_int page=get_page(vaddr);
5451   u_int vpage=get_vpage(vaddr);
5452   ll_add(jump_dirty+vpage,vaddr,(void *)out);
5453   do_dirty_stub_ds();
5454   ll_add(jump_in+page,vaddr,(void *)out);
5455   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
5456   if(regs[0].regmap[HOST_CCREG]!=CCREG)
5457     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty);
5458   if(regs[0].regmap[HOST_BTREG]!=BTREG)
5459     emit_writeword(HOST_BTREG,&branch_target);
5460   load_regs(regs[0].regmap_entry,regs[0].regmap,rs1[0],rs2[0]);
5461   address_generation(0,&regs[0],regs[0].regmap_entry);
5462   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
5463     load_regs(regs[0].regmap_entry,regs[0].regmap,INVCP,INVCP);
5464   is_delayslot=0;
5465   switch(itype[0]) {
5466     case ALU:
5467       alu_assemble(0,&regs[0]);break;
5468     case IMM16:
5469       imm16_assemble(0,&regs[0]);break;
5470     case SHIFT:
5471       shift_assemble(0,&regs[0]);break;
5472     case SHIFTIMM:
5473       shiftimm_assemble(0,&regs[0]);break;
5474     case LOAD:
5475       load_assemble(0,&regs[0]);break;
5476     case LOADLR:
5477       loadlr_assemble(0,&regs[0]);break;
5478     case STORE:
5479       store_assemble(0,&regs[0]);break;
5480     case STORELR:
5481       storelr_assemble(0,&regs[0]);break;
5482     case COP0:
5483       cop0_assemble(0,&regs[0]);break;
5484     case COP1:
5485       cop1_assemble(0,&regs[0]);break;
5486     case C1LS:
5487       c1ls_assemble(0,&regs[0]);break;
5488     case COP2:
5489       cop2_assemble(0,&regs[0]);break;
5490     case C2LS:
5491       c2ls_assemble(0,&regs[0]);break;
5492     case C2OP:
5493       c2op_assemble(0,&regs[0]);break;
5494     case MULTDIV:
5495       multdiv_assemble(0,&regs[0]);break;
5496     case MOV:
5497       mov_assemble(0,&regs[0]);break;
5498     case SYSCALL:
5499     case HLECALL:
5500     case INTCALL:
5501     case SPAN:
5502     case UJUMP:
5503     case RJUMP:
5504     case CJUMP:
5505     case SJUMP:
5506       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
5507   }
5508   int btaddr=get_reg(regs[0].regmap,BTREG);
5509   if(btaddr<0) {
5510     btaddr=get_reg(regs[0].regmap,-1);
5511     emit_readword(&branch_target,btaddr);
5512   }
5513   assert(btaddr!=HOST_CCREG);
5514   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
5515 #ifdef HOST_IMM8
5516   emit_movimm(start+4,HOST_TEMPREG);
5517   emit_cmp(btaddr,HOST_TEMPREG);
5518 #else
5519   emit_cmpimm(btaddr,start+4);
5520 #endif
5521   void *branch = out;
5522   emit_jeq(0);
5523   store_regs_bt(regs[0].regmap,regs[0].dirty,-1);
5524   emit_jmp(jump_vaddr_reg[btaddr]);
5525   set_jump_target(branch, out);
5526   store_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
5527   load_regs_bt(regs[0].regmap,regs[0].dirty,start+4);
5528 }
5529
5530 // Basic liveness analysis for MIPS registers
5531 void unneeded_registers(int istart,int iend,int r)
5532 {
5533   int i;
5534   uint64_t u,gte_u,b,gte_b;
5535   uint64_t temp_u,temp_gte_u=0;
5536   uint64_t gte_u_unknown=0;
5537   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
5538     gte_u_unknown=~0ll;
5539   if(iend==slen-1) {
5540     u=1;
5541     gte_u=gte_u_unknown;
5542   }else{
5543     //u=unneeded_reg[iend+1];
5544     u=1;
5545     gte_u=gte_unneeded[iend+1];
5546   }
5547
5548   for (i=iend;i>=istart;i--)
5549   {
5550     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
5551     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
5552     {
5553       // If subroutine call, flag return address as a possible branch target
5554       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
5555
5556       if(ba[i]<start || ba[i]>=(start+slen*4))
5557       {
5558         // Branch out of this block, flush all regs
5559         u=1;
5560         gte_u=gte_u_unknown;
5561         branch_unneeded_reg[i]=u;
5562         // Merge in delay slot
5563         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5564         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5565         u|=1;
5566         gte_u|=gte_rt[i+1];
5567         gte_u&=~gte_rs[i+1];
5568         // If branch is "likely" (and conditional)
5569         // then we skip the delay slot on the fall-thru path
5570         if(likely[i]) {
5571           if(i<slen-1) {
5572             u&=unneeded_reg[i+2];
5573             gte_u&=gte_unneeded[i+2];
5574           }
5575           else
5576           {
5577             u=1;
5578             gte_u=gte_u_unknown;
5579           }
5580         }
5581       }
5582       else
5583       {
5584         // Internal branch, flag target
5585         bt[(ba[i]-start)>>2]=1;
5586         if(ba[i]<=start+i*4) {
5587           // Backward branch
5588           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
5589           {
5590             // Unconditional branch
5591             temp_u=1;
5592             temp_gte_u=0;
5593           } else {
5594             // Conditional branch (not taken case)
5595             temp_u=unneeded_reg[i+2];
5596             temp_gte_u&=gte_unneeded[i+2];
5597           }
5598           // Merge in delay slot
5599           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5600           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5601           temp_u|=1;
5602           temp_gte_u|=gte_rt[i+1];
5603           temp_gte_u&=~gte_rs[i+1];
5604           // If branch is "likely" (and conditional)
5605           // then we skip the delay slot on the fall-thru path
5606           if(likely[i]) {
5607             if(i<slen-1) {
5608               temp_u&=unneeded_reg[i+2];
5609               temp_gte_u&=gte_unneeded[i+2];
5610             }
5611             else
5612             {
5613               temp_u=1;
5614               temp_gte_u=gte_u_unknown;
5615             }
5616           }
5617           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
5618           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5619           temp_u|=1;
5620           temp_gte_u|=gte_rt[i];
5621           temp_gte_u&=~gte_rs[i];
5622           unneeded_reg[i]=temp_u;
5623           gte_unneeded[i]=temp_gte_u;
5624           // Only go three levels deep.  This recursion can take an
5625           // excessive amount of time if there are a lot of nested loops.
5626           if(r<2) {
5627             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
5628           }else{
5629             unneeded_reg[(ba[i]-start)>>2]=1;
5630             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
5631           }
5632         } /*else*/ if(1) {
5633           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
5634           {
5635             // Unconditional branch
5636             u=unneeded_reg[(ba[i]-start)>>2];
5637             gte_u=gte_unneeded[(ba[i]-start)>>2];
5638             branch_unneeded_reg[i]=u;
5639             // Merge in delay slot
5640             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5641             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5642             u|=1;
5643             gte_u|=gte_rt[i+1];
5644             gte_u&=~gte_rs[i+1];
5645           } else {
5646             // Conditional branch
5647             b=unneeded_reg[(ba[i]-start)>>2];
5648             gte_b=gte_unneeded[(ba[i]-start)>>2];
5649             branch_unneeded_reg[i]=b;
5650             // Branch delay slot
5651             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
5652             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5653             b|=1;
5654             gte_b|=gte_rt[i+1];
5655             gte_b&=~gte_rs[i+1];
5656             // If branch is "likely" then we skip the
5657             // delay slot on the fall-thru path
5658             if(likely[i]) {
5659               u=b;
5660               gte_u=gte_b;
5661               if(i<slen-1) {
5662                 u&=unneeded_reg[i+2];
5663                 gte_u&=gte_unneeded[i+2];
5664               }
5665             } else {
5666               u&=b;
5667               gte_u&=gte_b;
5668             }
5669             if(i<slen-1) {
5670               branch_unneeded_reg[i]&=unneeded_reg[i+2];
5671             } else {
5672               branch_unneeded_reg[i]=1;
5673             }
5674           }
5675         }
5676       }
5677     }
5678     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
5679     {
5680       // SYSCALL instruction (software interrupt)
5681       u=1;
5682     }
5683     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
5684     {
5685       // ERET instruction (return from interrupt)
5686       u=1;
5687     }
5688     //u=1; // DEBUG
5689     // Written registers are unneeded
5690     u|=1LL<<rt1[i];
5691     u|=1LL<<rt2[i];
5692     gte_u|=gte_rt[i];
5693     // Accessed registers are needed
5694     u&=~(1LL<<rs1[i]);
5695     u&=~(1LL<<rs2[i]);
5696     gte_u&=~gte_rs[i];
5697     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
5698       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
5699     // Source-target dependencies
5700     // R0 is always unneeded
5701     u|=1;
5702     // Save it
5703     unneeded_reg[i]=u;
5704     gte_unneeded[i]=gte_u;
5705     /*
5706     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
5707     printf("U:");
5708     int r;
5709     for(r=1;r<=CCREG;r++) {
5710       if((unneeded_reg[i]>>r)&1) {
5711         if(r==HIREG) printf(" HI");
5712         else if(r==LOREG) printf(" LO");
5713         else printf(" r%d",r);
5714       }
5715     }
5716     printf("\n");
5717     */
5718   }
5719 }
5720
5721 // Write back dirty registers as soon as we will no longer modify them,
5722 // so that we don't end up with lots of writes at the branches.
5723 void clean_registers(int istart,int iend,int wr)
5724 {
5725   int i;
5726   int r;
5727   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
5728   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
5729   if(iend==slen-1) {
5730     will_dirty_i=will_dirty_next=0;
5731     wont_dirty_i=wont_dirty_next=0;
5732   }else{
5733     will_dirty_i=will_dirty_next=will_dirty[iend+1];
5734     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
5735   }
5736   for (i=iend;i>=istart;i--)
5737   {
5738     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
5739     {
5740       if(ba[i]<start || ba[i]>=(start+slen*4))
5741       {
5742         // Branch out of this block, flush all regs
5743         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
5744         {
5745           // Unconditional branch
5746           will_dirty_i=0;
5747           wont_dirty_i=0;
5748           // Merge in delay slot (will dirty)
5749           for(r=0;r<HOST_REGS;r++) {
5750             if(r!=EXCLUDE_REG) {
5751               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5752               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5753               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5754               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5755               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5756               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5757               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5758               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5759               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5760               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5761               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5762               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5763               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5764               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5765             }
5766           }
5767         }
5768         else
5769         {
5770           // Conditional branch
5771           will_dirty_i=0;
5772           wont_dirty_i=wont_dirty_next;
5773           // Merge in delay slot (will dirty)
5774           for(r=0;r<HOST_REGS;r++) {
5775             if(r!=EXCLUDE_REG) {
5776               if(!likely[i]) {
5777                 // Might not dirty if likely branch is not taken
5778                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5779                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5780                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5781                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5782                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5783                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
5784                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5785                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5786                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5787                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5788                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5789                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5790                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5791                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5792               }
5793             }
5794           }
5795         }
5796         // Merge in delay slot (wont dirty)
5797         for(r=0;r<HOST_REGS;r++) {
5798           if(r!=EXCLUDE_REG) {
5799             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
5800             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
5801             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
5802             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
5803             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
5804             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
5805             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
5806             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
5807             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
5808             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
5809           }
5810         }
5811         if(wr) {
5812           #ifndef DESTRUCTIVE_WRITEBACK
5813           branch_regs[i].dirty&=wont_dirty_i;
5814           #endif
5815           branch_regs[i].dirty|=will_dirty_i;
5816         }
5817       }
5818       else
5819       {
5820         // Internal branch
5821         if(ba[i]<=start+i*4) {
5822           // Backward branch
5823           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
5824           {
5825             // Unconditional branch
5826             temp_will_dirty=0;
5827             temp_wont_dirty=0;
5828             // Merge in delay slot (will dirty)
5829             for(r=0;r<HOST_REGS;r++) {
5830               if(r!=EXCLUDE_REG) {
5831                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
5832                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
5833                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
5834                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
5835                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
5836                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
5837                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
5838                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
5839                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
5840                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
5841                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
5842                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
5843                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
5844                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
5845               }
5846             }
5847           } else {
5848             // Conditional branch (not taken case)
5849             temp_will_dirty=will_dirty_next;
5850             temp_wont_dirty=wont_dirty_next;
5851             // Merge in delay slot (will dirty)
5852             for(r=0;r<HOST_REGS;r++) {
5853               if(r!=EXCLUDE_REG) {
5854                 if(!likely[i]) {
5855                   // Will not dirty if likely branch is not taken
5856                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
5857                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
5858                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
5859                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
5860                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
5861                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
5862                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
5863                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
5864                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
5865                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
5866                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
5867                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
5868                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
5869                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
5870                 }
5871               }
5872             }
5873           }
5874           // Merge in delay slot (wont dirty)
5875           for(r=0;r<HOST_REGS;r++) {
5876             if(r!=EXCLUDE_REG) {
5877               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
5878               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
5879               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
5880               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
5881               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
5882               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
5883               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
5884               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
5885               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
5886               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
5887             }
5888           }
5889           // Deal with changed mappings
5890           if(i<iend) {
5891             for(r=0;r<HOST_REGS;r++) {
5892               if(r!=EXCLUDE_REG) {
5893                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
5894                   temp_will_dirty&=~(1<<r);
5895                   temp_wont_dirty&=~(1<<r);
5896                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
5897                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
5898                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
5899                   } else {
5900                     temp_will_dirty|=1<<r;
5901                     temp_wont_dirty|=1<<r;
5902                   }
5903                 }
5904               }
5905             }
5906           }
5907           if(wr) {
5908             will_dirty[i]=temp_will_dirty;
5909             wont_dirty[i]=temp_wont_dirty;
5910             clean_registers((ba[i]-start)>>2,i-1,0);
5911           }else{
5912             // Limit recursion.  It can take an excessive amount
5913             // of time if there are a lot of nested loops.
5914             will_dirty[(ba[i]-start)>>2]=0;
5915             wont_dirty[(ba[i]-start)>>2]=-1;
5916           }
5917         }
5918         /*else*/ if(1)
5919         {
5920           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
5921           {
5922             // Unconditional branch
5923             will_dirty_i=0;
5924             wont_dirty_i=0;
5925           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
5926             for(r=0;r<HOST_REGS;r++) {
5927               if(r!=EXCLUDE_REG) {
5928                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
5929                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
5930                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
5931                 }
5932                 if(branch_regs[i].regmap[r]>=0) {
5933                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
5934                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
5935                 }
5936               }
5937             }
5938           //}
5939             // Merge in delay slot
5940             for(r=0;r<HOST_REGS;r++) {
5941               if(r!=EXCLUDE_REG) {
5942                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5943                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5944                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5945                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5946                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5947                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5948                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5949                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5950                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5951                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5952                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5953                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5954                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5955                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5956               }
5957             }
5958           } else {
5959             // Conditional branch
5960             will_dirty_i=will_dirty_next;
5961             wont_dirty_i=wont_dirty_next;
5962           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
5963             for(r=0;r<HOST_REGS;r++) {
5964               if(r!=EXCLUDE_REG) {
5965                 signed char target_reg=branch_regs[i].regmap[r];
5966                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
5967                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
5968                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
5969                 }
5970                 else if(target_reg>=0) {
5971                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
5972                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
5973                 }
5974                 // Treat delay slot as part of branch too
5975                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
5976                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
5977                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
5978                 }
5979                 else
5980                 {
5981                   will_dirty[i+1]&=~(1<<r);
5982                 }*/
5983               }
5984             }
5985           //}
5986             // Merge in delay slot
5987             for(r=0;r<HOST_REGS;r++) {
5988               if(r!=EXCLUDE_REG) {
5989                 if(!likely[i]) {
5990                   // Might not dirty if likely branch is not taken
5991                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5992                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
5993                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
5994                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
5995                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
5996                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
5997                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
5998                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
5999                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6000                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6001                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6002                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6003                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6004                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6005                 }
6006               }
6007             }
6008           }
6009           // Merge in delay slot (won't dirty)
6010           for(r=0;r<HOST_REGS;r++) {
6011             if(r!=EXCLUDE_REG) {
6012               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6013               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6014               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6015               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6016               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6017               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6018               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6019               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6020               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6021               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6022             }
6023           }
6024           if(wr) {
6025             #ifndef DESTRUCTIVE_WRITEBACK
6026             branch_regs[i].dirty&=wont_dirty_i;
6027             #endif
6028             branch_regs[i].dirty|=will_dirty_i;
6029           }
6030         }
6031       }
6032     }
6033     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6034     {
6035       // SYSCALL instruction (software interrupt)
6036       will_dirty_i=0;
6037       wont_dirty_i=0;
6038     }
6039     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6040     {
6041       // ERET instruction (return from interrupt)
6042       will_dirty_i=0;
6043       wont_dirty_i=0;
6044     }
6045     will_dirty_next=will_dirty_i;
6046     wont_dirty_next=wont_dirty_i;
6047     for(r=0;r<HOST_REGS;r++) {
6048       if(r!=EXCLUDE_REG) {
6049         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6050         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6051         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6052         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6053         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6054         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6055         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6056         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6057         if(i>istart) {
6058           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP)
6059           {
6060             // Don't store a register immediately after writing it,
6061             // may prevent dual-issue.
6062             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6063             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6064           }
6065         }
6066       }
6067     }
6068     // Save it
6069     will_dirty[i]=will_dirty_i;
6070     wont_dirty[i]=wont_dirty_i;
6071     // Mark registers that won't be dirtied as not dirty
6072     if(wr) {
6073       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6074       for(r=0;r<HOST_REGS;r++) {
6075         if((will_dirty_i>>r)&1) {
6076           printf(" r%d",r);
6077         }
6078       }
6079       printf("\n");*/
6080
6081       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP)) {
6082         regs[i].dirty|=will_dirty_i;
6083         #ifndef DESTRUCTIVE_WRITEBACK
6084         regs[i].dirty&=wont_dirty_i;
6085         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
6086         {
6087           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
6088             for(r=0;r<HOST_REGS;r++) {
6089               if(r!=EXCLUDE_REG) {
6090                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6091                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6092                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6093               }
6094             }
6095           }
6096         }
6097         else
6098         {
6099           if(i<iend) {
6100             for(r=0;r<HOST_REGS;r++) {
6101               if(r!=EXCLUDE_REG) {
6102                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6103                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6104                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6105               }
6106             }
6107           }
6108         }
6109         #endif
6110       //}
6111     }
6112     // Deal with changed mappings
6113     temp_will_dirty=will_dirty_i;
6114     temp_wont_dirty=wont_dirty_i;
6115     for(r=0;r<HOST_REGS;r++) {
6116       if(r!=EXCLUDE_REG) {
6117         int nr;
6118         if(regs[i].regmap[r]==regmap_pre[i][r]) {
6119           if(wr) {
6120             #ifndef DESTRUCTIVE_WRITEBACK
6121             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6122             #endif
6123             regs[i].wasdirty|=will_dirty_i&(1<<r);
6124           }
6125         }
6126         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6127           // Register moved to a different register
6128           will_dirty_i&=~(1<<r);
6129           wont_dirty_i&=~(1<<r);
6130           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6131           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6132           if(wr) {
6133             #ifndef DESTRUCTIVE_WRITEBACK
6134             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6135             #endif
6136             regs[i].wasdirty|=will_dirty_i&(1<<r);
6137           }
6138         }
6139         else {
6140           will_dirty_i&=~(1<<r);
6141           wont_dirty_i&=~(1<<r);
6142           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6143             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6144             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6145           } else {
6146             wont_dirty_i|=1<<r;
6147             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6148           }
6149         }
6150       }
6151     }
6152   }
6153 }
6154
6155 #ifdef DISASM
6156   /* disassembly */
6157 void disassemble_inst(int i)
6158 {
6159     if (bt[i]) printf("*"); else printf(" ");
6160     switch(itype[i]) {
6161       case UJUMP:
6162         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6163       case CJUMP:
6164         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6165       case SJUMP:
6166         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6167       case RJUMP:
6168         if (opcode[i]==0x9&&rt1[i]!=31)
6169           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6170         else
6171           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6172         break;
6173       case SPAN:
6174         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6175       case IMM16:
6176         if(opcode[i]==0xf) //LUI
6177           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6178         else
6179           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6180         break;
6181       case LOAD:
6182       case LOADLR:
6183         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6184         break;
6185       case STORE:
6186       case STORELR:
6187         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6188         break;
6189       case ALU:
6190       case SHIFT:
6191         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6192         break;
6193       case MULTDIV:
6194         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6195         break;
6196       case SHIFTIMM:
6197         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6198         break;
6199       case MOV:
6200         if((opcode2[i]&0x1d)==0x10)
6201           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6202         else if((opcode2[i]&0x1d)==0x11)
6203           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6204         else
6205           printf (" %x: %s\n",start+i*4,insn[i]);
6206         break;
6207       case COP0:
6208         if(opcode2[i]==0)
6209           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6210         else if(opcode2[i]==4)
6211           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6212         else printf (" %x: %s\n",start+i*4,insn[i]);
6213         break;
6214       case COP1:
6215         if(opcode2[i]<3)
6216           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6217         else if(opcode2[i]>3)
6218           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6219         else printf (" %x: %s\n",start+i*4,insn[i]);
6220         break;
6221       case COP2:
6222         if(opcode2[i]<3)
6223           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
6224         else if(opcode2[i]>3)
6225           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
6226         else printf (" %x: %s\n",start+i*4,insn[i]);
6227         break;
6228       case C1LS:
6229         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6230         break;
6231       case C2LS:
6232         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6233         break;
6234       case INTCALL:
6235         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6236         break;
6237       default:
6238         //printf (" %s %8x\n",insn[i],source[i]);
6239         printf (" %x: %s\n",start+i*4,insn[i]);
6240     }
6241 }
6242 #else
6243 static void disassemble_inst(int i) {}
6244 #endif // DISASM
6245
6246 #define DRC_TEST_VAL 0x74657374
6247
6248 static void new_dynarec_test(void)
6249 {
6250   int (*testfunc)(void);
6251   void *beginning;
6252   int ret[2];
6253   size_t i;
6254
6255   // check structure linkage
6256   if ((void *)reg != (void *)&psxRegs
6257       || (u_char *)rcnts - (u_char *)reg != sizeof(psxRegs))
6258   {
6259     SysPrintf("linkage_arm miscompilation/breakage detected.\n");
6260   }
6261
6262   SysPrintf("testing if we can run recompiled code...\n");
6263   ((volatile u_int *)out)[0]++; // make cache dirty
6264
6265   for (i = 0; i < ARRAY_SIZE(ret); i++) {
6266     out = translation_cache;
6267     beginning = start_block();
6268     emit_movimm(DRC_TEST_VAL + i, 0); // test
6269     emit_ret();
6270     literal_pool(0);
6271     end_block(beginning);
6272     testfunc = beginning;
6273     ret[i] = testfunc();
6274   }
6275
6276   if (ret[0] == DRC_TEST_VAL && ret[1] == DRC_TEST_VAL + 1)
6277     SysPrintf("test passed.\n");
6278   else
6279     SysPrintf("test failed, will likely crash soon (r=%08x %08x)\n", ret[0], ret[1]);
6280   out = translation_cache;
6281 }
6282
6283 // clear the state completely, instead of just marking
6284 // things invalid like invalidate_all_pages() does
6285 void new_dynarec_clear_full()
6286 {
6287   int n;
6288   out = translation_cache;
6289   memset(invalid_code,1,sizeof(invalid_code));
6290   memset(hash_table,0xff,sizeof(hash_table));
6291   memset(mini_ht,-1,sizeof(mini_ht));
6292   memset(restore_candidate,0,sizeof(restore_candidate));
6293   memset(shadow,0,sizeof(shadow));
6294   copy=shadow;
6295   expirep=16384; // Expiry pointer, +2 blocks
6296   pending_exception=0;
6297   literalcount=0;
6298   stop_after_jal=0;
6299   inv_code_start=inv_code_end=~0;
6300   // TLB
6301   for(n=0;n<4096;n++) ll_clear(jump_in+n);
6302   for(n=0;n<4096;n++) ll_clear(jump_out+n);
6303   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
6304 }
6305
6306 void new_dynarec_init()
6307 {
6308   SysPrintf("Init new dynarec\n");
6309
6310   // allocate/prepare a buffer for translation cache
6311   // see assem_arm.h for some explanation
6312 #if   defined(BASE_ADDR_FIXED)
6313   if (mmap(translation_cache, 1 << TARGET_SIZE_2,
6314             PROT_READ | PROT_WRITE | PROT_EXEC,
6315             MAP_PRIVATE | MAP_ANONYMOUS,
6316             -1, 0) != translation_cache) {
6317     SysPrintf("mmap() failed: %s\n", strerror(errno));
6318     SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
6319     abort();
6320   }
6321 #elif defined(BASE_ADDR_DYNAMIC)
6322   #ifdef VITA
6323   sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
6324   if (sceBlock < 0)
6325     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
6326   int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
6327   if (ret < 0)
6328     SysPrintf("sceKernelGetMemBlockBase failed\n");
6329   #else
6330   translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
6331             PROT_READ | PROT_WRITE | PROT_EXEC,
6332             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
6333   if (translation_cache == MAP_FAILED) {
6334     SysPrintf("mmap() failed: %s\n", strerror(errno));
6335     abort();
6336   }
6337   #endif
6338 #else
6339   #ifndef NO_WRITE_EXEC
6340   // not all systems allow execute in data segment by default
6341   if (mprotect(translation_cache, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
6342     SysPrintf("mprotect() failed: %s\n", strerror(errno));
6343   #endif
6344 #endif
6345   out = translation_cache;
6346   cycle_multiplier=200;
6347   new_dynarec_clear_full();
6348 #ifdef HOST_IMM8
6349   // Copy this into local area so we don't have to put it in every literal pool
6350   invc_ptr=invalid_code;
6351 #endif
6352   arch_init();
6353   new_dynarec_test();
6354 #ifndef RAM_FIXED
6355   ram_offset=(uintptr_t)rdram-0x80000000;
6356 #endif
6357   if (ram_offset!=0)
6358     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
6359 }
6360
6361 void new_dynarec_cleanup()
6362 {
6363   int n;
6364 #if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
6365   #ifdef VITA
6366   sceKernelFreeMemBlock(sceBlock);
6367   sceBlock = -1;
6368   #else
6369   if (munmap(translation_cache, 1<<TARGET_SIZE_2) < 0)
6370     SysPrintf("munmap() failed\n");
6371   #endif
6372 #endif
6373   for(n=0;n<4096;n++) ll_clear(jump_in+n);
6374   for(n=0;n<4096;n++) ll_clear(jump_out+n);
6375   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
6376   #ifdef ROM_COPY
6377   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
6378   #endif
6379 }
6380
6381 static u_int *get_source_start(u_int addr, u_int *limit)
6382 {
6383   if (addr < 0x00200000 ||
6384     (0xa0000000 <= addr && addr < 0xa0200000)) {
6385     // used for BIOS calls mostly?
6386     *limit = (addr&0xa0000000)|0x00200000;
6387     return (u_int *)(rdram + (addr&0x1fffff));
6388   }
6389   else if (!Config.HLE && (
6390     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
6391     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
6392     // BIOS
6393     *limit = (addr & 0xfff00000) | 0x80000;
6394     return (u_int *)((u_char *)psxR + (addr&0x7ffff));
6395   }
6396   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
6397     *limit = (addr & 0x80600000) + 0x00200000;
6398     return (u_int *)(rdram + (addr&0x1fffff));
6399   }
6400   return NULL;
6401 }
6402
6403 static u_int scan_for_ret(u_int addr)
6404 {
6405   u_int limit = 0;
6406   u_int *mem;
6407
6408   mem = get_source_start(addr, &limit);
6409   if (mem == NULL)
6410     return addr;
6411
6412   if (limit > addr + 0x1000)
6413     limit = addr + 0x1000;
6414   for (; addr < limit; addr += 4, mem++) {
6415     if (*mem == 0x03e00008) // jr $ra
6416       return addr + 8;
6417   }
6418   return addr;
6419 }
6420
6421 struct savestate_block {
6422   uint32_t addr;
6423   uint32_t regflags;
6424 };
6425
6426 static int addr_cmp(const void *p1_, const void *p2_)
6427 {
6428   const struct savestate_block *p1 = p1_, *p2 = p2_;
6429   return p1->addr - p2->addr;
6430 }
6431
6432 int new_dynarec_save_blocks(void *save, int size)
6433 {
6434   struct savestate_block *blocks = save;
6435   int maxcount = size / sizeof(blocks[0]);
6436   struct savestate_block tmp_blocks[1024];
6437   struct ll_entry *head;
6438   int p, s, d, o, bcnt;
6439   u_int addr;
6440
6441   o = 0;
6442   for (p = 0; p < ARRAY_SIZE(jump_in); p++) {
6443     bcnt = 0;
6444     for (head = jump_in[p]; head != NULL; head = head->next) {
6445       tmp_blocks[bcnt].addr = head->vaddr;
6446       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
6447       bcnt++;
6448     }
6449     if (bcnt < 1)
6450       continue;
6451     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
6452
6453     addr = tmp_blocks[0].addr;
6454     for (s = d = 0; s < bcnt; s++) {
6455       if (tmp_blocks[s].addr < addr)
6456         continue;
6457       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
6458         tmp_blocks[d++] = tmp_blocks[s];
6459       addr = scan_for_ret(tmp_blocks[s].addr);
6460     }
6461
6462     if (o + d > maxcount)
6463       d = maxcount - o;
6464     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
6465     o += d;
6466   }
6467
6468   return o * sizeof(blocks[0]);
6469 }
6470
6471 void new_dynarec_load_blocks(const void *save, int size)
6472 {
6473   const struct savestate_block *blocks = save;
6474   int count = size / sizeof(blocks[0]);
6475   u_int regs_save[32];
6476   uint32_t f;
6477   int i, b;
6478
6479   get_addr(psxRegs.pc);
6480
6481   // change GPRs for speculation to at least partially work..
6482   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
6483   for (i = 1; i < 32; i++)
6484     psxRegs.GPR.r[i] = 0x80000000;
6485
6486   for (b = 0; b < count; b++) {
6487     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
6488       if (f & 1)
6489         psxRegs.GPR.r[i] = 0x1f800000;
6490     }
6491
6492     get_addr(blocks[b].addr);
6493
6494     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
6495       if (f & 1)
6496         psxRegs.GPR.r[i] = 0x80000000;
6497     }
6498   }
6499
6500   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
6501 }
6502
6503 int new_recompile_block(int addr)
6504 {
6505   u_int pagelimit = 0;
6506   u_int state_rflags = 0;
6507   int i;
6508
6509   assem_debug("NOTCOMPILED: addr = %x -> %p\n", addr, out);
6510   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
6511   //if(debug)
6512   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
6513
6514   // this is just for speculation
6515   for (i = 1; i < 32; i++) {
6516     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
6517       state_rflags |= 1 << i;
6518   }
6519
6520   start = (u_int)addr&~3;
6521   //assert(((u_int)addr&1)==0);
6522   new_dynarec_did_compile=1;
6523   if (Config.HLE && start == 0x80001000) // hlecall
6524   {
6525     // XXX: is this enough? Maybe check hleSoftCall?
6526     void *beginning=start_block();
6527     u_int page=get_page(start);
6528
6529     invalid_code[start>>12]=0;
6530     emit_movimm(start,0);
6531     emit_writeword(0,&pcaddr);
6532     emit_jmp(new_dyna_leave);
6533     literal_pool(0);
6534     end_block(beginning);
6535     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
6536     return 0;
6537   }
6538
6539   source = get_source_start(start, &pagelimit);
6540   if (source == NULL) {
6541     SysPrintf("Compile at bogus memory address: %08x\n", addr);
6542     exit(1);
6543   }
6544
6545   /* Pass 1: disassemble */
6546   /* Pass 2: register dependencies, branch targets */
6547   /* Pass 3: register allocation */
6548   /* Pass 4: branch dependencies */
6549   /* Pass 5: pre-alloc */
6550   /* Pass 6: optimize clean/dirty state */
6551   /* Pass 7: flag 32-bit registers */
6552   /* Pass 8: assembly */
6553   /* Pass 9: linker */
6554   /* Pass 10: garbage collection / free memory */
6555
6556   int j;
6557   int done=0;
6558   unsigned int type,op,op2;
6559
6560   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
6561
6562   /* Pass 1 disassembly */
6563
6564   for(i=0;!done;i++) {
6565     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
6566     minimum_free_regs[i]=0;
6567     opcode[i]=op=source[i]>>26;
6568     switch(op)
6569     {
6570       case 0x00: strcpy(insn[i],"special"); type=NI;
6571         op2=source[i]&0x3f;
6572         switch(op2)
6573         {
6574           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
6575           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
6576           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
6577           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
6578           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
6579           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
6580           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
6581           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
6582           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
6583           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
6584           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
6585           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
6586           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
6587           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
6588           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
6589           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
6590           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
6591           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
6592           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
6593           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
6594           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
6595           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
6596           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
6597           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
6598           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
6599           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
6600           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
6601           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
6602           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
6603           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
6604           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
6605           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
6606           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
6607           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
6608           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
6609 #if 0
6610           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
6611           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
6612           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
6613           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
6614           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
6615           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
6616           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
6617           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
6618           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
6619           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
6620           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
6621           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
6622           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
6623           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
6624           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
6625           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
6626           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
6627 #endif
6628         }
6629         break;
6630       case 0x01: strcpy(insn[i],"regimm"); type=NI;
6631         op2=(source[i]>>16)&0x1f;
6632         switch(op2)
6633         {
6634           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
6635           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
6636           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
6637           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
6638           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
6639           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
6640           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
6641           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
6642           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
6643           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
6644           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
6645           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
6646           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
6647           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
6648         }
6649         break;
6650       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
6651       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
6652       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
6653       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
6654       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
6655       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
6656       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
6657       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
6658       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
6659       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
6660       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
6661       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
6662       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
6663       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
6664       case 0x10: strcpy(insn[i],"cop0"); type=NI;
6665         op2=(source[i]>>21)&0x1f;
6666         switch(op2)
6667         {
6668           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
6669           case 0x02: strcpy(insn[i],"CFC0"); type=COP0; break;
6670           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
6671           case 0x06: strcpy(insn[i],"CTC0"); type=COP0; break;
6672           case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
6673         }
6674         break;
6675       case 0x11: strcpy(insn[i],"cop1"); type=COP1;
6676         op2=(source[i]>>21)&0x1f;
6677         break;
6678 #if 0
6679       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
6680       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
6681       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
6682       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
6683       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
6684       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
6685       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
6686       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
6687 #endif
6688       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
6689       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
6690       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
6691       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
6692       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
6693       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
6694       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
6695 #if 0
6696       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
6697 #endif
6698       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
6699       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
6700       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
6701       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
6702 #if 0
6703       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
6704       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
6705 #endif
6706       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
6707       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
6708       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
6709       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
6710 #if 0
6711       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
6712       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
6713       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
6714 #endif
6715       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
6716       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
6717 #if 0
6718       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
6719       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
6720       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
6721 #endif
6722       case 0x12: strcpy(insn[i],"COP2"); type=NI;
6723         op2=(source[i]>>21)&0x1f;
6724         //if (op2 & 0x10)
6725         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
6726           if (gte_handlers[source[i]&0x3f]!=NULL) {
6727             if (gte_regnames[source[i]&0x3f]!=NULL)
6728               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
6729             else
6730               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
6731             type=C2OP;
6732           }
6733         }
6734         else switch(op2)
6735         {
6736           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
6737           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
6738           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
6739           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
6740         }
6741         break;
6742       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
6743       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
6744       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
6745       default: strcpy(insn[i],"???"); type=NI;
6746         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
6747         break;
6748     }
6749     itype[i]=type;
6750     opcode2[i]=op2;
6751     /* Get registers/immediates */
6752     lt1[i]=0;
6753     us1[i]=0;
6754     us2[i]=0;
6755     dep1[i]=0;
6756     dep2[i]=0;
6757     gte_rs[i]=gte_rt[i]=0;
6758     switch(type) {
6759       case LOAD:
6760         rs1[i]=(source[i]>>21)&0x1f;
6761         rs2[i]=0;
6762         rt1[i]=(source[i]>>16)&0x1f;
6763         rt2[i]=0;
6764         imm[i]=(short)source[i];
6765         break;
6766       case STORE:
6767       case STORELR:
6768         rs1[i]=(source[i]>>21)&0x1f;
6769         rs2[i]=(source[i]>>16)&0x1f;
6770         rt1[i]=0;
6771         rt2[i]=0;
6772         imm[i]=(short)source[i];
6773         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
6774         break;
6775       case LOADLR:
6776         // LWL/LWR only load part of the register,
6777         // therefore the target register must be treated as a source too
6778         rs1[i]=(source[i]>>21)&0x1f;
6779         rs2[i]=(source[i]>>16)&0x1f;
6780         rt1[i]=(source[i]>>16)&0x1f;
6781         rt2[i]=0;
6782         imm[i]=(short)source[i];
6783         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
6784         if(op==0x26) dep1[i]=rt1[i]; // LWR
6785         break;
6786       case IMM16:
6787         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
6788         else rs1[i]=(source[i]>>21)&0x1f;
6789         rs2[i]=0;
6790         rt1[i]=(source[i]>>16)&0x1f;
6791         rt2[i]=0;
6792         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
6793           imm[i]=(unsigned short)source[i];
6794         }else{
6795           imm[i]=(short)source[i];
6796         }
6797         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
6798         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
6799         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
6800         break;
6801       case UJUMP:
6802         rs1[i]=0;
6803         rs2[i]=0;
6804         rt1[i]=0;
6805         rt2[i]=0;
6806         // The JAL instruction writes to r31.
6807         if (op&1) {
6808           rt1[i]=31;
6809         }
6810         rs2[i]=CCREG;
6811         break;
6812       case RJUMP:
6813         rs1[i]=(source[i]>>21)&0x1f;
6814         rs2[i]=0;
6815         rt1[i]=0;
6816         rt2[i]=0;
6817         // The JALR instruction writes to rd.
6818         if (op2&1) {
6819           rt1[i]=(source[i]>>11)&0x1f;
6820         }
6821         rs2[i]=CCREG;
6822         break;
6823       case CJUMP:
6824         rs1[i]=(source[i]>>21)&0x1f;
6825         rs2[i]=(source[i]>>16)&0x1f;
6826         rt1[i]=0;
6827         rt2[i]=0;
6828         if(op&2) { // BGTZ/BLEZ
6829           rs2[i]=0;
6830         }
6831         us1[i]=rs1[i];
6832         us2[i]=rs2[i];
6833         likely[i]=op>>4;
6834         break;
6835       case SJUMP:
6836         rs1[i]=(source[i]>>21)&0x1f;
6837         rs2[i]=CCREG;
6838         rt1[i]=0;
6839         rt2[i]=0;
6840         us1[i]=rs1[i];
6841         if(op2&0x10) { // BxxAL
6842           rt1[i]=31;
6843           // NOTE: If the branch is not taken, r31 is still overwritten
6844         }
6845         likely[i]=(op2&2)>>1;
6846         break;
6847       case ALU:
6848         rs1[i]=(source[i]>>21)&0x1f; // source
6849         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
6850         rt1[i]=(source[i]>>11)&0x1f; // destination
6851         rt2[i]=0;
6852         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
6853           us1[i]=rs1[i];us2[i]=rs2[i];
6854         }
6855         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
6856           dep1[i]=rs1[i];dep2[i]=rs2[i];
6857         }
6858         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
6859           dep1[i]=rs1[i];dep2[i]=rs2[i];
6860         }
6861         break;
6862       case MULTDIV:
6863         rs1[i]=(source[i]>>21)&0x1f; // source
6864         rs2[i]=(source[i]>>16)&0x1f; // divisor
6865         rt1[i]=HIREG;
6866         rt2[i]=LOREG;
6867         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
6868           us1[i]=rs1[i];us2[i]=rs2[i];
6869         }
6870         break;
6871       case MOV:
6872         rs1[i]=0;
6873         rs2[i]=0;
6874         rt1[i]=0;
6875         rt2[i]=0;
6876         if(op2==0x10) rs1[i]=HIREG; // MFHI
6877         if(op2==0x11) rt1[i]=HIREG; // MTHI
6878         if(op2==0x12) rs1[i]=LOREG; // MFLO
6879         if(op2==0x13) rt1[i]=LOREG; // MTLO
6880         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
6881         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
6882         dep1[i]=rs1[i];
6883         break;
6884       case SHIFT:
6885         rs1[i]=(source[i]>>16)&0x1f; // target of shift
6886         rs2[i]=(source[i]>>21)&0x1f; // shift amount
6887         rt1[i]=(source[i]>>11)&0x1f; // destination
6888         rt2[i]=0;
6889         // DSLLV/DSRLV/DSRAV are 64-bit
6890         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
6891         break;
6892       case SHIFTIMM:
6893         rs1[i]=(source[i]>>16)&0x1f;
6894         rs2[i]=0;
6895         rt1[i]=(source[i]>>11)&0x1f;
6896         rt2[i]=0;
6897         imm[i]=(source[i]>>6)&0x1f;
6898         // DSxx32 instructions
6899         if(op2>=0x3c) imm[i]|=0x20;
6900         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
6901         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
6902         break;
6903       case COP0:
6904         rs1[i]=0;
6905         rs2[i]=0;
6906         rt1[i]=0;
6907         rt2[i]=0;
6908         if(op2==0||op2==2) rt1[i]=(source[i]>>16)&0x1F; // MFC0/CFC0
6909         if(op2==4||op2==6) rs1[i]=(source[i]>>16)&0x1F; // MTC0/CTC0
6910         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
6911         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
6912         break;
6913       case COP1:
6914         rs1[i]=0;
6915         rs2[i]=0;
6916         rt1[i]=0;
6917         rt2[i]=0;
6918         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
6919         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
6920         if(op2==5) us1[i]=rs1[i]; // DMTC1
6921         rs2[i]=CSREG;
6922         break;
6923       case COP2:
6924         rs1[i]=0;
6925         rs2[i]=0;
6926         rt1[i]=0;
6927         rt2[i]=0;
6928         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
6929         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
6930         rs2[i]=CSREG;
6931         int gr=(source[i]>>11)&0x1F;
6932         switch(op2)
6933         {
6934           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
6935           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
6936           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
6937           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
6938         }
6939         break;
6940       case C1LS:
6941         rs1[i]=(source[i]>>21)&0x1F;
6942         rs2[i]=CSREG;
6943         rt1[i]=0;
6944         rt2[i]=0;
6945         imm[i]=(short)source[i];
6946         break;
6947       case C2LS:
6948         rs1[i]=(source[i]>>21)&0x1F;
6949         rs2[i]=0;
6950         rt1[i]=0;
6951         rt2[i]=0;
6952         imm[i]=(short)source[i];
6953         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
6954         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
6955         break;
6956       case C2OP:
6957         rs1[i]=0;
6958         rs2[i]=0;
6959         rt1[i]=0;
6960         rt2[i]=0;
6961         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
6962         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
6963         gte_rt[i]|=1ll<<63; // every op changes flags
6964         if((source[i]&0x3f)==GTE_MVMVA) {
6965           int v = (source[i] >> 15) & 3;
6966           gte_rs[i]&=~0xe3fll;
6967           if(v==3) gte_rs[i]|=0xe00ll;
6968           else gte_rs[i]|=3ll<<(v*2);
6969         }
6970         break;
6971       case SYSCALL:
6972       case HLECALL:
6973       case INTCALL:
6974         rs1[i]=CCREG;
6975         rs2[i]=0;
6976         rt1[i]=0;
6977         rt2[i]=0;
6978         break;
6979       default:
6980         rs1[i]=0;
6981         rs2[i]=0;
6982         rt1[i]=0;
6983         rt2[i]=0;
6984     }
6985     /* Calculate branch target addresses */
6986     if(type==UJUMP)
6987       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
6988     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
6989       ba[i]=start+i*4+8; // Ignore never taken branch
6990     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
6991       ba[i]=start+i*4+8; // Ignore never taken branch
6992     else if(type==CJUMP||type==SJUMP)
6993       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
6994     else ba[i]=-1;
6995     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP)) {
6996       int do_in_intrp=0;
6997       // branch in delay slot?
6998       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP) {
6999         // don't handle first branch and call interpreter if it's hit
7000         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7001         do_in_intrp=1;
7002       }
7003       // basic load delay detection
7004       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7005         int t=(ba[i-1]-start)/4;
7006         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7007           // jump target wants DS result - potential load delay effect
7008           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7009           do_in_intrp=1;
7010           bt[t+1]=1; // expected return from interpreter
7011         }
7012         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7013               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
7014           // v0 overwrite like this is a sign of trouble, bail out
7015           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7016           do_in_intrp=1;
7017         }
7018       }
7019       if(do_in_intrp) {
7020         rs1[i-1]=CCREG;
7021         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7022         ba[i-1]=-1;
7023         itype[i-1]=INTCALL;
7024         done=2;
7025         i--; // don't compile the DS
7026       }
7027     }
7028     /* Is this the end of the block? */
7029     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
7030       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7031         done=2;
7032       }
7033       else {
7034         if(stop_after_jal) done=1;
7035         // Stop on BREAK
7036         if((source[i+1]&0xfc00003f)==0x0d) done=1;
7037       }
7038       // Don't recompile stuff that's already compiled
7039       if(check_addr(start+i*4+4)) done=1;
7040       // Don't get too close to the limit
7041       if(i>MAXBLOCK/2) done=1;
7042     }
7043     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7044     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7045     if(done==2) {
7046       // Does the block continue due to a branch?
7047       for(j=i-1;j>=0;j--)
7048       {
7049         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7050         if(ba[j]==start+i*4+4) done=j=0;
7051         if(ba[j]==start+i*4+8) done=j=0;
7052       }
7053     }
7054     //assert(i<MAXBLOCK-1);
7055     if(start+i*4==pagelimit-4) done=1;
7056     assert(start+i*4<pagelimit);
7057     if (i==MAXBLOCK-1) done=1;
7058     // Stop if we're compiling junk
7059     if(itype[i]==NI&&opcode[i]==0x11) {
7060       done=stop_after_jal=1;
7061       SysPrintf("Disabled speculative precompilation\n");
7062     }
7063   }
7064   slen=i;
7065   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP) {
7066     if(start+i*4==pagelimit) {
7067       itype[i-1]=SPAN;
7068     }
7069   }
7070   assert(slen>0);
7071
7072   /* Pass 2 - Register dependencies and branch targets */
7073
7074   unneeded_registers(0,slen-1,0);
7075
7076   /* Pass 3 - Register allocation */
7077
7078   struct regstat current; // Current register allocations/status
7079   current.dirty=0;
7080   current.u=unneeded_reg[0];
7081   clear_all_regs(current.regmap);
7082   alloc_reg(&current,0,CCREG);
7083   dirty_reg(&current,CCREG);
7084   current.isconst=0;
7085   current.wasconst=0;
7086   current.waswritten=0;
7087   int ds=0;
7088   int cc=0;
7089   int hr=-1;
7090
7091   if((u_int)addr&1) {
7092     // First instruction is delay slot
7093     cc=-1;
7094     bt[1]=1;
7095     ds=1;
7096     unneeded_reg[0]=1;
7097     current.regmap[HOST_BTREG]=BTREG;
7098   }
7099
7100   for(i=0;i<slen;i++)
7101   {
7102     if(bt[i])
7103     {
7104       int hr;
7105       for(hr=0;hr<HOST_REGS;hr++)
7106       {
7107         // Is this really necessary?
7108         if(current.regmap[hr]==0) current.regmap[hr]=-1;
7109       }
7110       current.isconst=0;
7111       current.waswritten=0;
7112     }
7113     if(i>1)
7114     {
7115       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7116       {
7117         if(rs1[i-2]==0||rs2[i-2]==0)
7118         {
7119           if(rs1[i-2]) {
7120             int hr=get_reg(current.regmap,rs1[i-2]|64);
7121             if(hr>=0) current.regmap[hr]=-1;
7122           }
7123           if(rs2[i-2]) {
7124             int hr=get_reg(current.regmap,rs2[i-2]|64);
7125             if(hr>=0) current.regmap[hr]=-1;
7126           }
7127         }
7128       }
7129     }
7130
7131     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
7132     regs[i].wasconst=current.isconst;
7133     regs[i].wasdirty=current.dirty;
7134     regs[i].loadedconst=0;
7135     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP) {
7136       if(i+1<slen) {
7137         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7138         current.u|=1;
7139       } else {
7140         current.u=1;
7141       }
7142     } else {
7143       if(i+1<slen) {
7144         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
7145         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
7146         current.u|=1;
7147       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
7148     }
7149     is_ds[i]=ds;
7150     if(ds) {
7151       ds=0; // Skip delay slot, already allocated as part of branch
7152       // ...but we need to alloc it in case something jumps here
7153       if(i+1<slen) {
7154         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
7155       }else{
7156         current.u=branch_unneeded_reg[i-1];
7157       }
7158       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
7159       current.u|=1;
7160       struct regstat temp;
7161       memcpy(&temp,&current,sizeof(current));
7162       temp.wasdirty=temp.dirty;
7163       // TODO: Take into account unconditional branches, as below
7164       delayslot_alloc(&temp,i);
7165       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
7166       regs[i].wasdirty=temp.wasdirty;
7167       regs[i].dirty=temp.dirty;
7168       regs[i].isconst=0;
7169       regs[i].wasconst=0;
7170       current.isconst=0;
7171       // Create entry (branch target) regmap
7172       for(hr=0;hr<HOST_REGS;hr++)
7173       {
7174         int r=temp.regmap[hr];
7175         if(r>=0) {
7176           if(r!=regmap_pre[i][hr]) {
7177             regs[i].regmap_entry[hr]=-1;
7178           }
7179           else
7180           {
7181             if(r<64){
7182               if((current.u>>r)&1) {
7183                 regs[i].regmap_entry[hr]=-1;
7184                 regs[i].regmap[hr]=-1;
7185                 //Don't clear regs in the delay slot as the branch might need them
7186                 //current.regmap[hr]=-1;
7187               }else
7188                 regs[i].regmap_entry[hr]=r;
7189             }
7190             else {
7191               assert(0);
7192             }
7193           }
7194         } else {
7195           // First instruction expects CCREG to be allocated
7196           if(i==0&&hr==HOST_CCREG)
7197             regs[i].regmap_entry[hr]=CCREG;
7198           else
7199             regs[i].regmap_entry[hr]=-1;
7200         }
7201       }
7202     }
7203     else { // Not delay slot
7204       switch(itype[i]) {
7205         case UJUMP:
7206           //current.isconst=0; // DEBUG
7207           //current.wasconst=0; // DEBUG
7208           //regs[i].wasconst=0; // DEBUG
7209           clear_const(&current,rt1[i]);
7210           alloc_cc(&current,i);
7211           dirty_reg(&current,CCREG);
7212           if (rt1[i]==31) {
7213             alloc_reg(&current,i,31);
7214             dirty_reg(&current,31);
7215             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
7216             //assert(rt1[i+1]!=rt1[i]);
7217             #ifdef REG_PREFETCH
7218             alloc_reg(&current,i,PTEMP);
7219             #endif
7220           }
7221           ooo[i]=1;
7222           delayslot_alloc(&current,i+1);
7223           //current.isconst=0; // DEBUG
7224           ds=1;
7225           //printf("i=%d, isconst=%x\n",i,current.isconst);
7226           break;
7227         case RJUMP:
7228           //current.isconst=0;
7229           //current.wasconst=0;
7230           //regs[i].wasconst=0;
7231           clear_const(&current,rs1[i]);
7232           clear_const(&current,rt1[i]);
7233           alloc_cc(&current,i);
7234           dirty_reg(&current,CCREG);
7235           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
7236             alloc_reg(&current,i,rs1[i]);
7237             if (rt1[i]!=0) {
7238               alloc_reg(&current,i,rt1[i]);
7239               dirty_reg(&current,rt1[i]);
7240               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
7241               assert(rt1[i+1]!=rt1[i]);
7242               #ifdef REG_PREFETCH
7243               alloc_reg(&current,i,PTEMP);
7244               #endif
7245             }
7246             #ifdef USE_MINI_HT
7247             if(rs1[i]==31) { // JALR
7248               alloc_reg(&current,i,RHASH);
7249               alloc_reg(&current,i,RHTBL);
7250             }
7251             #endif
7252             delayslot_alloc(&current,i+1);
7253           } else {
7254             // The delay slot overwrites our source register,
7255             // allocate a temporary register to hold the old value.
7256             current.isconst=0;
7257             current.wasconst=0;
7258             regs[i].wasconst=0;
7259             delayslot_alloc(&current,i+1);
7260             current.isconst=0;
7261             alloc_reg(&current,i,RTEMP);
7262           }
7263           //current.isconst=0; // DEBUG
7264           ooo[i]=1;
7265           ds=1;
7266           break;
7267         case CJUMP:
7268           //current.isconst=0;
7269           //current.wasconst=0;
7270           //regs[i].wasconst=0;
7271           clear_const(&current,rs1[i]);
7272           clear_const(&current,rs2[i]);
7273           if((opcode[i]&0x3E)==4) // BEQ/BNE
7274           {
7275             alloc_cc(&current,i);
7276             dirty_reg(&current,CCREG);
7277             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
7278             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
7279             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
7280                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
7281               // The delay slot overwrites one of our conditions.
7282               // Allocate the branch condition registers instead.
7283               current.isconst=0;
7284               current.wasconst=0;
7285               regs[i].wasconst=0;
7286               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
7287               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
7288             }
7289             else
7290             {
7291               ooo[i]=1;
7292               delayslot_alloc(&current,i+1);
7293             }
7294           }
7295           else
7296           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
7297           {
7298             alloc_cc(&current,i);
7299             dirty_reg(&current,CCREG);
7300             alloc_reg(&current,i,rs1[i]);
7301             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
7302               // The delay slot overwrites one of our conditions.
7303               // Allocate the branch condition registers instead.
7304               current.isconst=0;
7305               current.wasconst=0;
7306               regs[i].wasconst=0;
7307               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
7308             }
7309             else
7310             {
7311               ooo[i]=1;
7312               delayslot_alloc(&current,i+1);
7313             }
7314           }
7315           else
7316           // Don't alloc the delay slot yet because we might not execute it
7317           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
7318           {
7319             current.isconst=0;
7320             current.wasconst=0;
7321             regs[i].wasconst=0;
7322             alloc_cc(&current,i);
7323             dirty_reg(&current,CCREG);
7324             alloc_reg(&current,i,rs1[i]);
7325             alloc_reg(&current,i,rs2[i]);
7326           }
7327           else
7328           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
7329           {
7330             current.isconst=0;
7331             current.wasconst=0;
7332             regs[i].wasconst=0;
7333             alloc_cc(&current,i);
7334             dirty_reg(&current,CCREG);
7335             alloc_reg(&current,i,rs1[i]);
7336           }
7337           ds=1;
7338           //current.isconst=0;
7339           break;
7340         case SJUMP:
7341           //current.isconst=0;
7342           //current.wasconst=0;
7343           //regs[i].wasconst=0;
7344           clear_const(&current,rs1[i]);
7345           clear_const(&current,rt1[i]);
7346           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
7347           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
7348           {
7349             alloc_cc(&current,i);
7350             dirty_reg(&current,CCREG);
7351             alloc_reg(&current,i,rs1[i]);
7352             if (rt1[i]==31) { // BLTZAL/BGEZAL
7353               alloc_reg(&current,i,31);
7354               dirty_reg(&current,31);
7355               //#ifdef REG_PREFETCH
7356               //alloc_reg(&current,i,PTEMP);
7357               //#endif
7358             }
7359             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
7360                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
7361               // Allocate the branch condition registers instead.
7362               current.isconst=0;
7363               current.wasconst=0;
7364               regs[i].wasconst=0;
7365               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
7366             }
7367             else
7368             {
7369               ooo[i]=1;
7370               delayslot_alloc(&current,i+1);
7371             }
7372           }
7373           else
7374           // Don't alloc the delay slot yet because we might not execute it
7375           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
7376           {
7377             current.isconst=0;
7378             current.wasconst=0;
7379             regs[i].wasconst=0;
7380             alloc_cc(&current,i);
7381             dirty_reg(&current,CCREG);
7382             alloc_reg(&current,i,rs1[i]);
7383           }
7384           ds=1;
7385           //current.isconst=0;
7386           break;
7387         case IMM16:
7388           imm16_alloc(&current,i);
7389           break;
7390         case LOAD:
7391         case LOADLR:
7392           load_alloc(&current,i);
7393           break;
7394         case STORE:
7395         case STORELR:
7396           store_alloc(&current,i);
7397           break;
7398         case ALU:
7399           alu_alloc(&current,i);
7400           break;
7401         case SHIFT:
7402           shift_alloc(&current,i);
7403           break;
7404         case MULTDIV:
7405           multdiv_alloc(&current,i);
7406           break;
7407         case SHIFTIMM:
7408           shiftimm_alloc(&current,i);
7409           break;
7410         case MOV:
7411           mov_alloc(&current,i);
7412           break;
7413         case COP0:
7414           cop0_alloc(&current,i);
7415           break;
7416         case COP1:
7417         case COP2:
7418           cop12_alloc(&current,i);
7419           break;
7420         case C1LS:
7421           c1ls_alloc(&current,i);
7422           break;
7423         case C2LS:
7424           c2ls_alloc(&current,i);
7425           break;
7426         case C2OP:
7427           c2op_alloc(&current,i);
7428           break;
7429         case SYSCALL:
7430         case HLECALL:
7431         case INTCALL:
7432           syscall_alloc(&current,i);
7433           break;
7434         case SPAN:
7435           pagespan_alloc(&current,i);
7436           break;
7437       }
7438
7439       // Create entry (branch target) regmap
7440       for(hr=0;hr<HOST_REGS;hr++)
7441       {
7442         int r,or;
7443         r=current.regmap[hr];
7444         if(r>=0) {
7445           if(r!=regmap_pre[i][hr]) {
7446             // TODO: delay slot (?)
7447             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
7448             if(or<0||(r&63)>=TEMPREG){
7449               regs[i].regmap_entry[hr]=-1;
7450             }
7451             else
7452             {
7453               // Just move it to a different register
7454               regs[i].regmap_entry[hr]=r;
7455               // If it was dirty before, it's still dirty
7456               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
7457             }
7458           }
7459           else
7460           {
7461             // Unneeded
7462             if(r==0){
7463               regs[i].regmap_entry[hr]=0;
7464             }
7465             else
7466             if(r<64){
7467               if((current.u>>r)&1) {
7468                 regs[i].regmap_entry[hr]=-1;
7469                 //regs[i].regmap[hr]=-1;
7470                 current.regmap[hr]=-1;
7471               }else
7472                 regs[i].regmap_entry[hr]=r;
7473             }
7474             else {
7475               assert(0);
7476             }
7477           }
7478         } else {
7479           // Branches expect CCREG to be allocated at the target
7480           if(regmap_pre[i][hr]==CCREG)
7481             regs[i].regmap_entry[hr]=CCREG;
7482           else
7483             regs[i].regmap_entry[hr]=-1;
7484         }
7485       }
7486       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
7487     }
7488
7489     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
7490       current.waswritten|=1<<rs1[i-1];
7491     current.waswritten&=~(1<<rt1[i]);
7492     current.waswritten&=~(1<<rt2[i]);
7493     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
7494       current.waswritten&=~(1<<rs1[i]);
7495
7496     /* Branch post-alloc */
7497     if(i>0)
7498     {
7499       current.wasdirty=current.dirty;
7500       switch(itype[i-1]) {
7501         case UJUMP:
7502           memcpy(&branch_regs[i-1],&current,sizeof(current));
7503           branch_regs[i-1].isconst=0;
7504           branch_regs[i-1].wasconst=0;
7505           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7506           alloc_cc(&branch_regs[i-1],i-1);
7507           dirty_reg(&branch_regs[i-1],CCREG);
7508           if(rt1[i-1]==31) { // JAL
7509             alloc_reg(&branch_regs[i-1],i-1,31);
7510             dirty_reg(&branch_regs[i-1],31);
7511           }
7512           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7513           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
7514           break;
7515         case RJUMP:
7516           memcpy(&branch_regs[i-1],&current,sizeof(current));
7517           branch_regs[i-1].isconst=0;
7518           branch_regs[i-1].wasconst=0;
7519           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7520           alloc_cc(&branch_regs[i-1],i-1);
7521           dirty_reg(&branch_regs[i-1],CCREG);
7522           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
7523           if(rt1[i-1]!=0) { // JALR
7524             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
7525             dirty_reg(&branch_regs[i-1],rt1[i-1]);
7526           }
7527           #ifdef USE_MINI_HT
7528           if(rs1[i-1]==31) { // JALR
7529             alloc_reg(&branch_regs[i-1],i-1,RHASH);
7530             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
7531           }
7532           #endif
7533           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7534           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
7535           break;
7536         case CJUMP:
7537           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
7538           {
7539             alloc_cc(&current,i-1);
7540             dirty_reg(&current,CCREG);
7541             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
7542                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
7543               // The delay slot overwrote one of our conditions
7544               // Delay slot goes after the test (in order)
7545               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7546               current.u|=1;
7547               delayslot_alloc(&current,i);
7548               current.isconst=0;
7549             }
7550             else
7551             {
7552               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
7553               // Alloc the branch condition registers
7554               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
7555               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
7556             }
7557             memcpy(&branch_regs[i-1],&current,sizeof(current));
7558             branch_regs[i-1].isconst=0;
7559             branch_regs[i-1].wasconst=0;
7560             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
7561             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
7562           }
7563           else
7564           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
7565           {
7566             alloc_cc(&current,i-1);
7567             dirty_reg(&current,CCREG);
7568             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
7569               // The delay slot overwrote the branch condition
7570               // Delay slot goes after the test (in order)
7571               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7572               current.u|=1;
7573               delayslot_alloc(&current,i);
7574               current.isconst=0;
7575             }
7576             else
7577             {
7578               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
7579               // Alloc the branch condition register
7580               alloc_reg(&current,i-1,rs1[i-1]);
7581             }
7582             memcpy(&branch_regs[i-1],&current,sizeof(current));
7583             branch_regs[i-1].isconst=0;
7584             branch_regs[i-1].wasconst=0;
7585             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
7586             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
7587           }
7588           else
7589           // Alloc the delay slot in case the branch is taken
7590           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
7591           {
7592             memcpy(&branch_regs[i-1],&current,sizeof(current));
7593             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7594             alloc_cc(&branch_regs[i-1],i);
7595             dirty_reg(&branch_regs[i-1],CCREG);
7596             delayslot_alloc(&branch_regs[i-1],i);
7597             branch_regs[i-1].isconst=0;
7598             alloc_reg(&current,i,CCREG); // Not taken path
7599             dirty_reg(&current,CCREG);
7600             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7601           }
7602           else
7603           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
7604           {
7605             memcpy(&branch_regs[i-1],&current,sizeof(current));
7606             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7607             alloc_cc(&branch_regs[i-1],i);
7608             dirty_reg(&branch_regs[i-1],CCREG);
7609             delayslot_alloc(&branch_regs[i-1],i);
7610             branch_regs[i-1].isconst=0;
7611             alloc_reg(&current,i,CCREG); // Not taken path
7612             dirty_reg(&current,CCREG);
7613             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7614           }
7615           break;
7616         case SJUMP:
7617           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
7618           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
7619           {
7620             alloc_cc(&current,i-1);
7621             dirty_reg(&current,CCREG);
7622             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
7623               // The delay slot overwrote the branch condition
7624               // Delay slot goes after the test (in order)
7625               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
7626               current.u|=1;
7627               delayslot_alloc(&current,i);
7628               current.isconst=0;
7629             }
7630             else
7631             {
7632               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
7633               // Alloc the branch condition register
7634               alloc_reg(&current,i-1,rs1[i-1]);
7635             }
7636             memcpy(&branch_regs[i-1],&current,sizeof(current));
7637             branch_regs[i-1].isconst=0;
7638             branch_regs[i-1].wasconst=0;
7639             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
7640             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
7641           }
7642           else
7643           // Alloc the delay slot in case the branch is taken
7644           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
7645           {
7646             memcpy(&branch_regs[i-1],&current,sizeof(current));
7647             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
7648             alloc_cc(&branch_regs[i-1],i);
7649             dirty_reg(&branch_regs[i-1],CCREG);
7650             delayslot_alloc(&branch_regs[i-1],i);
7651             branch_regs[i-1].isconst=0;
7652             alloc_reg(&current,i,CCREG); // Not taken path
7653             dirty_reg(&current,CCREG);
7654             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
7655           }
7656           // FIXME: BLTZAL/BGEZAL
7657           if(opcode2[i-1]&0x10) { // BxxZAL
7658             alloc_reg(&branch_regs[i-1],i-1,31);
7659             dirty_reg(&branch_regs[i-1],31);
7660           }
7661           break;
7662       }
7663
7664       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7665       {
7666         if(rt1[i-1]==31) // JAL/JALR
7667         {
7668           // Subroutine call will return here, don't alloc any registers
7669           current.dirty=0;
7670           clear_all_regs(current.regmap);
7671           alloc_reg(&current,i,CCREG);
7672           dirty_reg(&current,CCREG);
7673         }
7674         else if(i+1<slen)
7675         {
7676           // Internal branch will jump here, match registers to caller
7677           current.dirty=0;
7678           clear_all_regs(current.regmap);
7679           alloc_reg(&current,i,CCREG);
7680           dirty_reg(&current,CCREG);
7681           for(j=i-1;j>=0;j--)
7682           {
7683             if(ba[j]==start+i*4+4) {
7684               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
7685               current.dirty=branch_regs[j].dirty;
7686               break;
7687             }
7688           }
7689           while(j>=0) {
7690             if(ba[j]==start+i*4+4) {
7691               for(hr=0;hr<HOST_REGS;hr++) {
7692                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
7693                   current.regmap[hr]=-1;
7694                 }
7695                 current.dirty&=branch_regs[j].dirty;
7696               }
7697             }
7698             j--;
7699           }
7700         }
7701       }
7702     }
7703
7704     // Count cycles in between branches
7705     ccadj[i]=cc;
7706     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
7707     {
7708       cc=0;
7709     }
7710 #if !defined(DRC_DBG)
7711     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
7712     {
7713       // GTE runs in parallel until accessed, divide by 2 for a rough guess
7714       cc+=gte_cycletab[source[i]&0x3f]/2;
7715     }
7716     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
7717     {
7718       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
7719     }
7720     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
7721     {
7722       cc+=4;
7723     }
7724     else if(itype[i]==C2LS)
7725     {
7726       cc+=4;
7727     }
7728 #endif
7729     else
7730     {
7731       cc++;
7732     }
7733
7734     if(!is_ds[i]) {
7735       regs[i].dirty=current.dirty;
7736       regs[i].isconst=current.isconst;
7737       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
7738     }
7739     for(hr=0;hr<HOST_REGS;hr++) {
7740       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
7741         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
7742           regs[i].wasconst&=~(1<<hr);
7743         }
7744       }
7745     }
7746     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
7747     regs[i].waswritten=current.waswritten;
7748   }
7749
7750   /* Pass 4 - Cull unused host registers */
7751
7752   uint64_t nr=0;
7753
7754   for (i=slen-1;i>=0;i--)
7755   {
7756     int hr;
7757     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
7758     {
7759       if(ba[i]<start || ba[i]>=(start+slen*4))
7760       {
7761         // Branch out of this block, don't need anything
7762         nr=0;
7763       }
7764       else
7765       {
7766         // Internal branch
7767         // Need whatever matches the target
7768         nr=0;
7769         int t=(ba[i]-start)>>2;
7770         for(hr=0;hr<HOST_REGS;hr++)
7771         {
7772           if(regs[i].regmap_entry[hr]>=0) {
7773             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
7774           }
7775         }
7776       }
7777       // Conditional branch may need registers for following instructions
7778       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7779       {
7780         if(i<slen-2) {
7781           nr|=needed_reg[i+2];
7782           for(hr=0;hr<HOST_REGS;hr++)
7783           {
7784             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
7785             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
7786           }
7787         }
7788       }
7789       // Don't need stuff which is overwritten
7790       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7791       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7792       // Merge in delay slot
7793       for(hr=0;hr<HOST_REGS;hr++)
7794       {
7795         if(!likely[i]) {
7796           // These are overwritten unless the branch is "likely"
7797           // and the delay slot is nullified if not taken
7798           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
7799           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
7800         }
7801         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7802         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7803         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
7804         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
7805         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7806         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7807         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
7808         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
7809         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
7810           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
7811           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
7812         }
7813       }
7814     }
7815     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7816     {
7817       // SYSCALL instruction (software interrupt)
7818       nr=0;
7819     }
7820     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7821     {
7822       // ERET instruction (return from interrupt)
7823       nr=0;
7824     }
7825     else // Non-branch
7826     {
7827       if(i<slen-1) {
7828         for(hr=0;hr<HOST_REGS;hr++) {
7829           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
7830           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
7831           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
7832           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
7833         }
7834       }
7835     }
7836     for(hr=0;hr<HOST_REGS;hr++)
7837     {
7838       // Overwritten registers are not needed
7839       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
7840       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
7841       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
7842       // Source registers are needed
7843       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7844       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7845       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
7846       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
7847       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7848       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7849       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
7850       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
7851       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
7852         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
7853         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
7854       }
7855       // Don't store a register immediately after writing it,
7856       // may prevent dual-issue.
7857       // But do so if this is a branch target, otherwise we
7858       // might have to load the register before the branch.
7859       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
7860         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1))) {
7861           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7862           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
7863         }
7864         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1))) {
7865           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7866           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
7867         }
7868       }
7869     }
7870     // Cycle count is needed at branches.  Assume it is needed at the target too.
7871     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==SPAN) {
7872       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7873       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
7874     }
7875     // Save it
7876     needed_reg[i]=nr;
7877
7878     // Deallocate unneeded registers
7879     for(hr=0;hr<HOST_REGS;hr++)
7880     {
7881       if(!((nr>>hr)&1)) {
7882         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
7883         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
7884            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
7885            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
7886         {
7887           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7888           {
7889             if(likely[i]) {
7890               regs[i].regmap[hr]=-1;
7891               regs[i].isconst&=~(1<<hr);
7892               if(i<slen-2) {
7893                 regmap_pre[i+2][hr]=-1;
7894                 regs[i+2].wasconst&=~(1<<hr);
7895               }
7896             }
7897           }
7898         }
7899         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
7900         {
7901           int d1=0,d2=0,map=0,temp=0;
7902           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
7903           {
7904             d1=dep1[i+1];
7905             d2=dep2[i+1];
7906           }
7907           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
7908              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
7909             map=INVCP;
7910           }
7911           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
7912              itype[i+1]==C1LS || itype[i+1]==C2LS)
7913             temp=FTEMP;
7914           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
7915              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
7916              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
7917              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
7918              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
7919              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
7920              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
7921              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
7922              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
7923              regs[i].regmap[hr]!=map )
7924           {
7925             regs[i].regmap[hr]=-1;
7926             regs[i].isconst&=~(1<<hr);
7927             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
7928                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
7929                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
7930                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
7931                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
7932                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
7933                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
7934                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
7935                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
7936                branch_regs[i].regmap[hr]!=map)
7937             {
7938               branch_regs[i].regmap[hr]=-1;
7939               branch_regs[i].regmap_entry[hr]=-1;
7940               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7941               {
7942                 if(!likely[i]&&i<slen-2) {
7943                   regmap_pre[i+2][hr]=-1;
7944                   regs[i+2].wasconst&=~(1<<hr);
7945                 }
7946               }
7947             }
7948           }
7949         }
7950         else
7951         {
7952           // Non-branch
7953           if(i>0)
7954           {
7955             int d1=0,d2=0,map=-1,temp=-1;
7956             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
7957             {
7958               d1=dep1[i];
7959               d2=dep2[i];
7960             }
7961             if(itype[i]==STORE || itype[i]==STORELR ||
7962                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
7963               map=INVCP;
7964             }
7965             if(itype[i]==LOADLR || itype[i]==STORELR ||
7966                itype[i]==C1LS || itype[i]==C2LS)
7967               temp=FTEMP;
7968             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
7969                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
7970                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
7971                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
7972                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
7973                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
7974             {
7975               if(i<slen-1&&!is_ds[i]) {
7976                 assert(regs[i].regmap[hr]<64);
7977                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
7978                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
7979                 {
7980                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
7981                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
7982                 }
7983                 regmap_pre[i+1][hr]=-1;
7984                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
7985                 regs[i+1].wasconst&=~(1<<hr);
7986               }
7987               regs[i].regmap[hr]=-1;
7988               regs[i].isconst&=~(1<<hr);
7989             }
7990           }
7991         }
7992       }
7993     }
7994   }
7995
7996   /* Pass 5 - Pre-allocate registers */
7997
7998   // If a register is allocated during a loop, try to allocate it for the
7999   // entire loop, if possible.  This avoids loading/storing registers
8000   // inside of the loop.
8001
8002   signed char f_regmap[HOST_REGS];
8003   clear_all_regs(f_regmap);
8004   for(i=0;i<slen-1;i++)
8005   {
8006     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8007     {
8008       if(ba[i]>=start && ba[i]<(start+i*4))
8009       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
8010       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
8011       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
8012       ||itype[i+1]==SHIFT||itype[i+1]==COP1
8013       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
8014       {
8015         int t=(ba[i]-start)>>2;
8016         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP)) // loop_preload can't handle jumps into delay slots
8017         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
8018         for(hr=0;hr<HOST_REGS;hr++)
8019         {
8020           if(regs[i].regmap[hr]>64) {
8021             if(!((regs[i].dirty>>hr)&1))
8022               f_regmap[hr]=regs[i].regmap[hr];
8023             else f_regmap[hr]=-1;
8024           }
8025           else if(regs[i].regmap[hr]>=0) {
8026             if(f_regmap[hr]!=regs[i].regmap[hr]) {
8027               // dealloc old register
8028               int n;
8029               for(n=0;n<HOST_REGS;n++)
8030               {
8031                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8032               }
8033               // and alloc new one
8034               f_regmap[hr]=regs[i].regmap[hr];
8035             }
8036           }
8037           if(branch_regs[i].regmap[hr]>64) {
8038             if(!((branch_regs[i].dirty>>hr)&1))
8039               f_regmap[hr]=branch_regs[i].regmap[hr];
8040             else f_regmap[hr]=-1;
8041           }
8042           else if(branch_regs[i].regmap[hr]>=0) {
8043             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
8044               // dealloc old register
8045               int n;
8046               for(n=0;n<HOST_REGS;n++)
8047               {
8048                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
8049               }
8050               // and alloc new one
8051               f_regmap[hr]=branch_regs[i].regmap[hr];
8052             }
8053           }
8054           if(ooo[i]) {
8055             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
8056               f_regmap[hr]=branch_regs[i].regmap[hr];
8057           }else{
8058             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
8059               f_regmap[hr]=branch_regs[i].regmap[hr];
8060           }
8061           // Avoid dirty->clean transition
8062           #ifdef DESTRUCTIVE_WRITEBACK
8063           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
8064           #endif
8065           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
8066           // case above, however it's always a good idea.  We can't hoist the
8067           // load if the register was already allocated, so there's no point
8068           // wasting time analyzing most of these cases.  It only "succeeds"
8069           // when the mapping was different and the load can be replaced with
8070           // a mov, which is of negligible benefit.  So such cases are
8071           // skipped below.
8072           if(f_regmap[hr]>0) {
8073             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
8074               int r=f_regmap[hr];
8075               for(j=t;j<=i;j++)
8076               {
8077                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8078                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
8079                 assert(r < 64);
8080                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
8081                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
8082                   int k;
8083                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
8084                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
8085                     if(r>63) {
8086                       if(get_reg(regs[i].regmap,r&63)<0) break;
8087                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
8088                     }
8089                     k=i;
8090                     while(k>1&&regs[k-1].regmap[hr]==-1) {
8091                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8092                         //printf("no free regs for store %x\n",start+(k-1)*4);
8093                         break;
8094                       }
8095                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
8096                         //printf("no-match due to different register\n");
8097                         break;
8098                       }
8099                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP) {
8100                         //printf("no-match due to branch\n");
8101                         break;
8102                       }
8103                       // call/ret fast path assumes no registers allocated
8104                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
8105                         break;
8106                       }
8107                       assert(r < 64);
8108                       k--;
8109                     }
8110                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
8111                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
8112                       while(k<i) {
8113                         regs[k].regmap_entry[hr]=f_regmap[hr];
8114                         regs[k].regmap[hr]=f_regmap[hr];
8115                         regmap_pre[k+1][hr]=f_regmap[hr];
8116                         regs[k].wasdirty&=~(1<<hr);
8117                         regs[k].dirty&=~(1<<hr);
8118                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
8119                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
8120                         regs[k].wasconst&=~(1<<hr);
8121                         regs[k].isconst&=~(1<<hr);
8122                         k++;
8123                       }
8124                     }
8125                     else {
8126                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
8127                       break;
8128                     }
8129                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
8130                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
8131                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
8132                       regs[i].regmap_entry[hr]=f_regmap[hr];
8133                       regs[i].regmap[hr]=f_regmap[hr];
8134                       regs[i].wasdirty&=~(1<<hr);
8135                       regs[i].dirty&=~(1<<hr);
8136                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
8137                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
8138                       regs[i].wasconst&=~(1<<hr);
8139                       regs[i].isconst&=~(1<<hr);
8140                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
8141                       branch_regs[i].wasdirty&=~(1<<hr);
8142                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
8143                       branch_regs[i].regmap[hr]=f_regmap[hr];
8144                       branch_regs[i].dirty&=~(1<<hr);
8145                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
8146                       branch_regs[i].wasconst&=~(1<<hr);
8147                       branch_regs[i].isconst&=~(1<<hr);
8148                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
8149                         regmap_pre[i+2][hr]=f_regmap[hr];
8150                         regs[i+2].wasdirty&=~(1<<hr);
8151                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
8152                       }
8153                     }
8154                   }
8155                   for(k=t;k<j;k++) {
8156                     // Alloc register clean at beginning of loop,
8157                     // but may dirty it in pass 6
8158                     regs[k].regmap_entry[hr]=f_regmap[hr];
8159                     regs[k].regmap[hr]=f_regmap[hr];
8160                     regs[k].dirty&=~(1<<hr);
8161                     regs[k].wasconst&=~(1<<hr);
8162                     regs[k].isconst&=~(1<<hr);
8163                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP) {
8164                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
8165                       branch_regs[k].regmap[hr]=f_regmap[hr];
8166                       branch_regs[k].dirty&=~(1<<hr);
8167                       branch_regs[k].wasconst&=~(1<<hr);
8168                       branch_regs[k].isconst&=~(1<<hr);
8169                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
8170                         regmap_pre[k+2][hr]=f_regmap[hr];
8171                         regs[k+2].wasdirty&=~(1<<hr);
8172                       }
8173                     }
8174                     else
8175                     {
8176                       regmap_pre[k+1][hr]=f_regmap[hr];
8177                       regs[k+1].wasdirty&=~(1<<hr);
8178                     }
8179                   }
8180                   if(regs[j].regmap[hr]==f_regmap[hr])
8181                     regs[j].regmap_entry[hr]=f_regmap[hr];
8182                   break;
8183                 }
8184                 if(j==i) break;
8185                 if(regs[j].regmap[hr]>=0)
8186                   break;
8187                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
8188                   //printf("no-match due to different register\n");
8189                   break;
8190                 }
8191                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
8192                 {
8193                   // Stop on unconditional branch
8194                   break;
8195                 }
8196                 if(itype[j]==CJUMP||itype[j]==SJUMP)
8197                 {
8198                   if(ooo[j]) {
8199                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
8200                       break;
8201                   }else{
8202                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
8203                       break;
8204                   }
8205                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
8206                     //printf("no-match due to different register (branch)\n");
8207                     break;
8208                   }
8209                 }
8210                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8211                   //printf("No free regs for store %x\n",start+j*4);
8212                   break;
8213                 }
8214                 assert(f_regmap[hr]<64);
8215               }
8216             }
8217           }
8218         }
8219       }
8220     }else{
8221       // Non branch or undetermined branch target
8222       for(hr=0;hr<HOST_REGS;hr++)
8223       {
8224         if(hr!=EXCLUDE_REG) {
8225           if(regs[i].regmap[hr]>64) {
8226             if(!((regs[i].dirty>>hr)&1))
8227               f_regmap[hr]=regs[i].regmap[hr];
8228           }
8229           else if(regs[i].regmap[hr]>=0) {
8230             if(f_regmap[hr]!=regs[i].regmap[hr]) {
8231               // dealloc old register
8232               int n;
8233               for(n=0;n<HOST_REGS;n++)
8234               {
8235                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
8236               }
8237               // and alloc new one
8238               f_regmap[hr]=regs[i].regmap[hr];
8239             }
8240           }
8241         }
8242       }
8243       // Try to restore cycle count at branch targets
8244       if(bt[i]) {
8245         for(j=i;j<slen-1;j++) {
8246           if(regs[j].regmap[HOST_CCREG]!=-1) break;
8247           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
8248             //printf("no free regs for store %x\n",start+j*4);
8249             break;
8250           }
8251         }
8252         if(regs[j].regmap[HOST_CCREG]==CCREG) {
8253           int k=i;
8254           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
8255           while(k<j) {
8256             regs[k].regmap_entry[HOST_CCREG]=CCREG;
8257             regs[k].regmap[HOST_CCREG]=CCREG;
8258             regmap_pre[k+1][HOST_CCREG]=CCREG;
8259             regs[k+1].wasdirty|=1<<HOST_CCREG;
8260             regs[k].dirty|=1<<HOST_CCREG;
8261             regs[k].wasconst&=~(1<<HOST_CCREG);
8262             regs[k].isconst&=~(1<<HOST_CCREG);
8263             k++;
8264           }
8265           regs[j].regmap_entry[HOST_CCREG]=CCREG;
8266         }
8267         // Work backwards from the branch target
8268         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
8269         {
8270           //printf("Extend backwards\n");
8271           int k;
8272           k=i;
8273           while(regs[k-1].regmap[HOST_CCREG]==-1) {
8274             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
8275               //printf("no free regs for store %x\n",start+(k-1)*4);
8276               break;
8277             }
8278             k--;
8279           }
8280           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
8281             //printf("Extend CC, %x ->\n",start+k*4);
8282             while(k<=i) {
8283               regs[k].regmap_entry[HOST_CCREG]=CCREG;
8284               regs[k].regmap[HOST_CCREG]=CCREG;
8285               regmap_pre[k+1][HOST_CCREG]=CCREG;
8286               regs[k+1].wasdirty|=1<<HOST_CCREG;
8287               regs[k].dirty|=1<<HOST_CCREG;
8288               regs[k].wasconst&=~(1<<HOST_CCREG);
8289               regs[k].isconst&=~(1<<HOST_CCREG);
8290               k++;
8291             }
8292           }
8293           else {
8294             //printf("Fail Extend CC, %x ->\n",start+k*4);
8295           }
8296         }
8297       }
8298       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
8299          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
8300          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1)
8301       {
8302         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
8303       }
8304     }
8305   }
8306
8307   // This allocates registers (if possible) one instruction prior
8308   // to use, which can avoid a load-use penalty on certain CPUs.
8309   for(i=0;i<slen-1;i++)
8310   {
8311     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP))
8312     {
8313       if(!bt[i+1])
8314       {
8315         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
8316            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
8317         {
8318           if(rs1[i+1]) {
8319             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
8320             {
8321               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8322               {
8323                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8324                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8325                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8326                 regs[i].isconst&=~(1<<hr);
8327                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8328                 constmap[i][hr]=constmap[i+1][hr];
8329                 regs[i+1].wasdirty&=~(1<<hr);
8330                 regs[i].dirty&=~(1<<hr);
8331               }
8332             }
8333           }
8334           if(rs2[i+1]) {
8335             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
8336             {
8337               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8338               {
8339                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
8340                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
8341                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
8342                 regs[i].isconst&=~(1<<hr);
8343                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8344                 constmap[i][hr]=constmap[i+1][hr];
8345                 regs[i+1].wasdirty&=~(1<<hr);
8346                 regs[i].dirty&=~(1<<hr);
8347               }
8348             }
8349           }
8350           // Preload target address for load instruction (non-constant)
8351           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8352             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
8353             {
8354               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8355               {
8356                 regs[i].regmap[hr]=rs1[i+1];
8357                 regmap_pre[i+1][hr]=rs1[i+1];
8358                 regs[i+1].regmap_entry[hr]=rs1[i+1];
8359                 regs[i].isconst&=~(1<<hr);
8360                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8361                 constmap[i][hr]=constmap[i+1][hr];
8362                 regs[i+1].wasdirty&=~(1<<hr);
8363                 regs[i].dirty&=~(1<<hr);
8364               }
8365             }
8366           }
8367           // Load source into target register
8368           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8369             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
8370             {
8371               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8372               {
8373                 regs[i].regmap[hr]=rs1[i+1];
8374                 regmap_pre[i+1][hr]=rs1[i+1];
8375                 regs[i+1].regmap_entry[hr]=rs1[i+1];
8376                 regs[i].isconst&=~(1<<hr);
8377                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8378                 constmap[i][hr]=constmap[i+1][hr];
8379                 regs[i+1].wasdirty&=~(1<<hr);
8380                 regs[i].dirty&=~(1<<hr);
8381               }
8382             }
8383           }
8384           // Address for store instruction (non-constant)
8385           if(itype[i+1]==STORE||itype[i+1]==STORELR
8386              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
8387             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8388               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
8389               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
8390               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
8391               assert(hr>=0);
8392               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8393               {
8394                 regs[i].regmap[hr]=rs1[i+1];
8395                 regmap_pre[i+1][hr]=rs1[i+1];
8396                 regs[i+1].regmap_entry[hr]=rs1[i+1];
8397                 regs[i].isconst&=~(1<<hr);
8398                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8399                 constmap[i][hr]=constmap[i+1][hr];
8400                 regs[i+1].wasdirty&=~(1<<hr);
8401                 regs[i].dirty&=~(1<<hr);
8402               }
8403             }
8404           }
8405           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
8406             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
8407               int nr;
8408               hr=get_reg(regs[i+1].regmap,FTEMP);
8409               assert(hr>=0);
8410               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
8411               {
8412                 regs[i].regmap[hr]=rs1[i+1];
8413                 regmap_pre[i+1][hr]=rs1[i+1];
8414                 regs[i+1].regmap_entry[hr]=rs1[i+1];
8415                 regs[i].isconst&=~(1<<hr);
8416                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
8417                 constmap[i][hr]=constmap[i+1][hr];
8418                 regs[i+1].wasdirty&=~(1<<hr);
8419                 regs[i].dirty&=~(1<<hr);
8420               }
8421               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
8422               {
8423                 // move it to another register
8424                 regs[i+1].regmap[hr]=-1;
8425                 regmap_pre[i+2][hr]=-1;
8426                 regs[i+1].regmap[nr]=FTEMP;
8427                 regmap_pre[i+2][nr]=FTEMP;
8428                 regs[i].regmap[nr]=rs1[i+1];
8429                 regmap_pre[i+1][nr]=rs1[i+1];
8430                 regs[i+1].regmap_entry[nr]=rs1[i+1];
8431                 regs[i].isconst&=~(1<<nr);
8432                 regs[i+1].isconst&=~(1<<nr);
8433                 regs[i].dirty&=~(1<<nr);
8434                 regs[i+1].wasdirty&=~(1<<nr);
8435                 regs[i+1].dirty&=~(1<<nr);
8436                 regs[i+2].wasdirty&=~(1<<nr);
8437               }
8438             }
8439           }
8440           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
8441             if(itype[i+1]==LOAD)
8442               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
8443             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
8444               hr=get_reg(regs[i+1].regmap,FTEMP);
8445             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
8446               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
8447               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
8448             }
8449             if(hr>=0&&regs[i].regmap[hr]<0) {
8450               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
8451               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
8452                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
8453                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
8454                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
8455                 regs[i].isconst&=~(1<<hr);
8456                 regs[i+1].wasdirty&=~(1<<hr);
8457                 regs[i].dirty&=~(1<<hr);
8458               }
8459             }
8460           }
8461         }
8462       }
8463     }
8464   }
8465
8466   /* Pass 6 - Optimize clean/dirty state */
8467   clean_registers(0,slen-1,1);
8468
8469   /* Pass 7 - Identify 32-bit registers */
8470   for (i=slen-1;i>=0;i--)
8471   {
8472     if(itype[i]==CJUMP||itype[i]==SJUMP)
8473     {
8474       // Conditional branch
8475       if((source[i]>>16)!=0x1000&&i<slen-2) {
8476         // Mark this address as a branch target since it may be called
8477         // upon return from interrupt
8478         bt[i+2]=1;
8479       }
8480     }
8481   }
8482
8483   if(itype[slen-1]==SPAN) {
8484     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
8485   }
8486
8487 #ifdef DISASM
8488   /* Debug/disassembly */
8489   for(i=0;i<slen;i++)
8490   {
8491     printf("U:");
8492     int r;
8493     for(r=1;r<=CCREG;r++) {
8494       if((unneeded_reg[i]>>r)&1) {
8495         if(r==HIREG) printf(" HI");
8496         else if(r==LOREG) printf(" LO");
8497         else printf(" r%d",r);
8498       }
8499     }
8500     printf("\n");
8501     #if defined(__i386__) || defined(__x86_64__)
8502     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
8503     #endif
8504     #ifdef __arm__
8505     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
8506     #endif
8507     printf("needs: ");
8508     if(needed_reg[i]&1) printf("eax ");
8509     if((needed_reg[i]>>1)&1) printf("ecx ");
8510     if((needed_reg[i]>>2)&1) printf("edx ");
8511     if((needed_reg[i]>>3)&1) printf("ebx ");
8512     if((needed_reg[i]>>5)&1) printf("ebp ");
8513     if((needed_reg[i]>>6)&1) printf("esi ");
8514     if((needed_reg[i]>>7)&1) printf("edi ");
8515     printf("\n");
8516     #if defined(__i386__) || defined(__x86_64__)
8517     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
8518     printf("dirty: ");
8519     if(regs[i].wasdirty&1) printf("eax ");
8520     if((regs[i].wasdirty>>1)&1) printf("ecx ");
8521     if((regs[i].wasdirty>>2)&1) printf("edx ");
8522     if((regs[i].wasdirty>>3)&1) printf("ebx ");
8523     if((regs[i].wasdirty>>5)&1) printf("ebp ");
8524     if((regs[i].wasdirty>>6)&1) printf("esi ");
8525     if((regs[i].wasdirty>>7)&1) printf("edi ");
8526     #endif
8527     #ifdef __arm__
8528     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
8529     printf("dirty: ");
8530     if(regs[i].wasdirty&1) printf("r0 ");
8531     if((regs[i].wasdirty>>1)&1) printf("r1 ");
8532     if((regs[i].wasdirty>>2)&1) printf("r2 ");
8533     if((regs[i].wasdirty>>3)&1) printf("r3 ");
8534     if((regs[i].wasdirty>>4)&1) printf("r4 ");
8535     if((regs[i].wasdirty>>5)&1) printf("r5 ");
8536     if((regs[i].wasdirty>>6)&1) printf("r6 ");
8537     if((regs[i].wasdirty>>7)&1) printf("r7 ");
8538     if((regs[i].wasdirty>>8)&1) printf("r8 ");
8539     if((regs[i].wasdirty>>9)&1) printf("r9 ");
8540     if((regs[i].wasdirty>>10)&1) printf("r10 ");
8541     if((regs[i].wasdirty>>12)&1) printf("r12 ");
8542     #endif
8543     printf("\n");
8544     disassemble_inst(i);
8545     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
8546     #if defined(__i386__) || defined(__x86_64__)
8547     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
8548     if(regs[i].dirty&1) printf("eax ");
8549     if((regs[i].dirty>>1)&1) printf("ecx ");
8550     if((regs[i].dirty>>2)&1) printf("edx ");
8551     if((regs[i].dirty>>3)&1) printf("ebx ");
8552     if((regs[i].dirty>>5)&1) printf("ebp ");
8553     if((regs[i].dirty>>6)&1) printf("esi ");
8554     if((regs[i].dirty>>7)&1) printf("edi ");
8555     #endif
8556     #ifdef __arm__
8557     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
8558     if(regs[i].dirty&1) printf("r0 ");
8559     if((regs[i].dirty>>1)&1) printf("r1 ");
8560     if((regs[i].dirty>>2)&1) printf("r2 ");
8561     if((regs[i].dirty>>3)&1) printf("r3 ");
8562     if((regs[i].dirty>>4)&1) printf("r4 ");
8563     if((regs[i].dirty>>5)&1) printf("r5 ");
8564     if((regs[i].dirty>>6)&1) printf("r6 ");
8565     if((regs[i].dirty>>7)&1) printf("r7 ");
8566     if((regs[i].dirty>>8)&1) printf("r8 ");
8567     if((regs[i].dirty>>9)&1) printf("r9 ");
8568     if((regs[i].dirty>>10)&1) printf("r10 ");
8569     if((regs[i].dirty>>12)&1) printf("r12 ");
8570     #endif
8571     printf("\n");
8572     if(regs[i].isconst) {
8573       printf("constants: ");
8574       #if defined(__i386__) || defined(__x86_64__)
8575       if(regs[i].isconst&1) printf("eax=%x ",(u_int)constmap[i][0]);
8576       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(u_int)constmap[i][1]);
8577       if((regs[i].isconst>>2)&1) printf("edx=%x ",(u_int)constmap[i][2]);
8578       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(u_int)constmap[i][3]);
8579       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(u_int)constmap[i][5]);
8580       if((regs[i].isconst>>6)&1) printf("esi=%x ",(u_int)constmap[i][6]);
8581       if((regs[i].isconst>>7)&1) printf("edi=%x ",(u_int)constmap[i][7]);
8582       #endif
8583       #ifdef __arm__
8584       int r;
8585       for (r = 0; r < ARRAY_SIZE(constmap[i]); r++)
8586         if ((regs[i].isconst >> r) & 1)
8587           printf(" r%d=%x", r, (u_int)constmap[i][r]);
8588       #endif
8589       printf("\n");
8590     }
8591     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
8592       #if defined(__i386__) || defined(__x86_64__)
8593       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
8594       if(branch_regs[i].dirty&1) printf("eax ");
8595       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
8596       if((branch_regs[i].dirty>>2)&1) printf("edx ");
8597       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
8598       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
8599       if((branch_regs[i].dirty>>6)&1) printf("esi ");
8600       if((branch_regs[i].dirty>>7)&1) printf("edi ");
8601       #endif
8602       #ifdef __arm__
8603       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
8604       if(branch_regs[i].dirty&1) printf("r0 ");
8605       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
8606       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
8607       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
8608       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
8609       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
8610       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
8611       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
8612       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
8613       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
8614       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
8615       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
8616       #endif
8617     }
8618   }
8619 #endif // DISASM
8620
8621   /* Pass 8 - Assembly */
8622   linkcount=0;stubcount=0;
8623   ds=0;is_delayslot=0;
8624   u_int dirty_pre=0;
8625   void *beginning=start_block();
8626   if((u_int)addr&1) {
8627     ds=1;
8628     pagespan_ds();
8629   }
8630   void *instr_addr0_override = NULL;
8631
8632   if (start == 0x80030000) {
8633     // nasty hack for fastbios thing
8634     // override block entry to this code
8635     instr_addr0_override = out;
8636     emit_movimm(start,0);
8637     // abuse io address var as a flag that we
8638     // have already returned here once
8639     emit_readword(&address,1);
8640     emit_writeword(0,&pcaddr);
8641     emit_writeword(0,&address);
8642     emit_cmp(0,1);
8643     emit_jne(new_dyna_leave);
8644   }
8645   for(i=0;i<slen;i++)
8646   {
8647     //if(ds) printf("ds: ");
8648     disassemble_inst(i);
8649     if(ds) {
8650       ds=0; // Skip delay slot
8651       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
8652       instr_addr[i] = NULL;
8653     } else {
8654       speculate_register_values(i);
8655       #ifndef DESTRUCTIVE_WRITEBACK
8656       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
8657       {
8658         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,unneeded_reg[i]);
8659       }
8660       if((itype[i]==CJUMP||itype[i]==SJUMP)&&!likely[i]) {
8661         dirty_pre=branch_regs[i].dirty;
8662       }else{
8663         dirty_pre=regs[i].dirty;
8664       }
8665       #endif
8666       // write back
8667       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
8668       {
8669         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,unneeded_reg[i]);
8670         loop_preload(regmap_pre[i],regs[i].regmap_entry);
8671       }
8672       // branch target entry point
8673       instr_addr[i] = out;
8674       assem_debug("<->\n");
8675       drc_dbg_emit_do_cmp(i);
8676
8677       // load regs
8678       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
8679         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty);
8680       load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i],rs2[i]);
8681       address_generation(i,&regs[i],regs[i].regmap_entry);
8682       load_consts(regmap_pre[i],regs[i].regmap,i);
8683       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP)
8684       {
8685         // Load the delay slot registers if necessary
8686         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
8687           load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
8688         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
8689           load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
8690         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
8691           load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
8692       }
8693       else if(i+1<slen)
8694       {
8695         // Preload registers for following instruction
8696         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
8697           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
8698             load_regs(regs[i].regmap_entry,regs[i].regmap,rs1[i+1],rs1[i+1]);
8699         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
8700           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
8701             load_regs(regs[i].regmap_entry,regs[i].regmap,rs2[i+1],rs2[i+1]);
8702       }
8703       // TODO: if(is_ooo(i)) address_generation(i+1);
8704       if(itype[i]==CJUMP)
8705         load_regs(regs[i].regmap_entry,regs[i].regmap,CCREG,CCREG);
8706       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
8707         load_regs(regs[i].regmap_entry,regs[i].regmap,INVCP,INVCP);
8708       // assemble
8709       switch(itype[i]) {
8710         case ALU:
8711           alu_assemble(i,&regs[i]);break;
8712         case IMM16:
8713           imm16_assemble(i,&regs[i]);break;
8714         case SHIFT:
8715           shift_assemble(i,&regs[i]);break;
8716         case SHIFTIMM:
8717           shiftimm_assemble(i,&regs[i]);break;
8718         case LOAD:
8719           load_assemble(i,&regs[i]);break;
8720         case LOADLR:
8721           loadlr_assemble(i,&regs[i]);break;
8722         case STORE:
8723           store_assemble(i,&regs[i]);break;
8724         case STORELR:
8725           storelr_assemble(i,&regs[i]);break;
8726         case COP0:
8727           cop0_assemble(i,&regs[i]);break;
8728         case COP1:
8729           cop1_assemble(i,&regs[i]);break;
8730         case C1LS:
8731           c1ls_assemble(i,&regs[i]);break;
8732         case COP2:
8733           cop2_assemble(i,&regs[i]);break;
8734         case C2LS:
8735           c2ls_assemble(i,&regs[i]);break;
8736         case C2OP:
8737           c2op_assemble(i,&regs[i]);break;
8738         case MULTDIV:
8739           multdiv_assemble(i,&regs[i]);break;
8740         case MOV:
8741           mov_assemble(i,&regs[i]);break;
8742         case SYSCALL:
8743           syscall_assemble(i,&regs[i]);break;
8744         case HLECALL:
8745           hlecall_assemble(i,&regs[i]);break;
8746         case INTCALL:
8747           intcall_assemble(i,&regs[i]);break;
8748         case UJUMP:
8749           ujump_assemble(i,&regs[i]);ds=1;break;
8750         case RJUMP:
8751           rjump_assemble(i,&regs[i]);ds=1;break;
8752         case CJUMP:
8753           cjump_assemble(i,&regs[i]);ds=1;break;
8754         case SJUMP:
8755           sjump_assemble(i,&regs[i]);ds=1;break;
8756         case SPAN:
8757           pagespan_assemble(i,&regs[i]);break;
8758       }
8759       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
8760         literal_pool(1024);
8761       else
8762         literal_pool_jumpover(256);
8763     }
8764   }
8765   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
8766   // If the block did not end with an unconditional branch,
8767   // add a jump to the next instruction.
8768   if(i>1) {
8769     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
8770       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
8771       assert(i==slen);
8772       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP) {
8773         store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
8774         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
8775           emit_loadreg(CCREG,HOST_CCREG);
8776         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
8777       }
8778       else if(!likely[i-2])
8779       {
8780         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].dirty,start+i*4);
8781         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
8782       }
8783       else
8784       {
8785         store_regs_bt(regs[i-2].regmap,regs[i-2].dirty,start+i*4);
8786         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
8787       }
8788       add_to_linker(out,start+i*4,0);
8789       emit_jmp(0);
8790     }
8791   }
8792   else
8793   {
8794     assert(i>0);
8795     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP);
8796     store_regs_bt(regs[i-1].regmap,regs[i-1].dirty,start+i*4);
8797     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
8798       emit_loadreg(CCREG,HOST_CCREG);
8799     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
8800     add_to_linker(out,start+i*4,0);
8801     emit_jmp(0);
8802   }
8803
8804   // TODO: delay slot stubs?
8805   // Stubs
8806   for(i=0;i<stubcount;i++)
8807   {
8808     switch(stubs[i].type)
8809     {
8810       case LOADB_STUB:
8811       case LOADH_STUB:
8812       case LOADW_STUB:
8813       case LOADD_STUB:
8814       case LOADBU_STUB:
8815       case LOADHU_STUB:
8816         do_readstub(i);break;
8817       case STOREB_STUB:
8818       case STOREH_STUB:
8819       case STOREW_STUB:
8820       case STORED_STUB:
8821         do_writestub(i);break;
8822       case CC_STUB:
8823         do_ccstub(i);break;
8824       case INVCODE_STUB:
8825         do_invstub(i);break;
8826       case FP_STUB:
8827         do_cop1stub(i);break;
8828       case STORELR_STUB:
8829         do_unalignedwritestub(i);break;
8830     }
8831   }
8832
8833   if (instr_addr0_override)
8834     instr_addr[0] = instr_addr0_override;
8835
8836   /* Pass 9 - Linker */
8837   for(i=0;i<linkcount;i++)
8838   {
8839     assem_debug("%p -> %8x\n",link_addr[i].addr,link_addr[i].target);
8840     literal_pool(64);
8841     if (!link_addr[i].ext)
8842     {
8843       void *stub = out;
8844       void *addr = check_addr(link_addr[i].target);
8845       emit_extjump(link_addr[i].addr, link_addr[i].target);
8846       if (addr) {
8847         set_jump_target(link_addr[i].addr, addr);
8848         add_link(link_addr[i].target,stub);
8849       }
8850       else
8851         set_jump_target(link_addr[i].addr, stub);
8852     }
8853     else
8854     {
8855       // Internal branch
8856       int target=(link_addr[i].target-start)>>2;
8857       assert(target>=0&&target<slen);
8858       assert(instr_addr[target]);
8859       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
8860       //set_jump_target_fillslot(link_addr[i].addr,instr_addr[target],link_addr[i].ext>>1);
8861       //#else
8862       set_jump_target(link_addr[i].addr, instr_addr[target]);
8863       //#endif
8864     }
8865   }
8866   // External Branch Targets (jump_in)
8867   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
8868   for(i=0;i<slen;i++)
8869   {
8870     if(bt[i]||i==0)
8871     {
8872       if(instr_addr[i]) // TODO - delay slots (=null)
8873       {
8874         u_int vaddr=start+i*4;
8875         u_int page=get_page(vaddr);
8876         u_int vpage=get_vpage(vaddr);
8877         literal_pool(256);
8878         {
8879           assem_debug("%p (%d) <- %8x\n",instr_addr[i],i,start+i*4);
8880           assem_debug("jump_in: %x\n",start+i*4);
8881           ll_add(jump_dirty+vpage,vaddr,out);
8882           void *entry_point = do_dirty_stub(i);
8883           ll_add_flags(jump_in+page,vaddr,state_rflags,entry_point);
8884           // If there was an existing entry in the hash table,
8885           // replace it with the new address.
8886           // Don't add new entries.  We'll insert the
8887           // ones that actually get used in check_addr().
8888           struct ht_entry *ht_bin = hash_table_get(vaddr);
8889           if (ht_bin->vaddr[0] == vaddr)
8890             ht_bin->tcaddr[0] = entry_point;
8891           if (ht_bin->vaddr[1] == vaddr)
8892             ht_bin->tcaddr[1] = entry_point;
8893         }
8894       }
8895     }
8896   }
8897   // Write out the literal pool if necessary
8898   literal_pool(0);
8899   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
8900   // Align code
8901   if(((u_int)out)&7) emit_addnop(13);
8902   #endif
8903   assert(out - (u_char *)beginning < MAX_OUTPUT_BLOCK_SIZE);
8904   //printf("shadow buffer: %p-%p\n",copy,(u_char *)copy+slen*4);
8905   memcpy(copy,source,slen*4);
8906   copy+=slen*4;
8907
8908   end_block(beginning);
8909
8910   // If we're within 256K of the end of the buffer,
8911   // start over from the beginning. (Is 256K enough?)
8912   if (out > translation_cache+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE)
8913     out = translation_cache;
8914
8915   // Trap writes to any of the pages we compiled
8916   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
8917     invalid_code[i]=0;
8918   }
8919   inv_code_start=inv_code_end=~0;
8920
8921   // for PCSX we need to mark all mirrors too
8922   if(get_page(start)<(RAM_SIZE>>12))
8923     for(i=start>>12;i<=(start+slen*4)>>12;i++)
8924       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
8925       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
8926       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
8927
8928   /* Pass 10 - Free memory by expiring oldest blocks */
8929
8930   int end=(((out-translation_cache)>>(TARGET_SIZE_2-16))+16384)&65535;
8931   while(expirep!=end)
8932   {
8933     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
8934     uintptr_t base=(uintptr_t)translation_cache+((expirep>>13)<<shift); // Base address of this block
8935     inv_debug("EXP: Phase %d\n",expirep);
8936     switch((expirep>>11)&3)
8937     {
8938       case 0:
8939         // Clear jump_in and jump_dirty
8940         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
8941         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
8942         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
8943         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
8944         break;
8945       case 1:
8946         // Clear pointers
8947         ll_kill_pointers(jump_out[expirep&2047],base,shift);
8948         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
8949         break;
8950       case 2:
8951         // Clear hash table
8952         for(i=0;i<32;i++) {
8953           struct ht_entry *ht_bin = &hash_table[((expirep&2047)<<5)+i];
8954           if (((uintptr_t)ht_bin->tcaddr[1]>>shift) == (base>>shift) ||
8955              (((uintptr_t)ht_bin->tcaddr[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
8956             inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[1],ht_bin->tcaddr[1]);
8957             ht_bin->vaddr[1] = -1;
8958             ht_bin->tcaddr[1] = NULL;
8959           }
8960           if (((uintptr_t)ht_bin->tcaddr[0]>>shift) == (base>>shift) ||
8961              (((uintptr_t)ht_bin->tcaddr[0]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
8962             inv_debug("EXP: Remove hash %x -> %p\n",ht_bin->vaddr[0],ht_bin->tcaddr[0]);
8963             ht_bin->vaddr[0] = ht_bin->vaddr[1];
8964             ht_bin->tcaddr[0] = ht_bin->tcaddr[1];
8965             ht_bin->vaddr[1] = -1;
8966             ht_bin->tcaddr[1] = NULL;
8967           }
8968         }
8969         break;
8970       case 3:
8971         // Clear jump_out
8972         #if defined(__arm__) || defined(__aarch64__)
8973         if((expirep&2047)==0)
8974           do_clear_cache();
8975         #endif
8976         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
8977         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
8978         break;
8979     }
8980     expirep=(expirep+1)&65535;
8981   }
8982   return 0;
8983 }
8984
8985 // vim:shiftwidth=2:expandtab