Rearrange files for new_dynarec
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26 #ifdef __MACH__
27 #include <libkern/OSCacheControl.h>
28 #endif
29 #ifdef _3DS
30 #include <3ds_utils.h>
31 #endif
32 #ifdef VITA
33 #include <psp2/kernel/sysmem.h>
34 static int sceBlock;
35 int getVMBlock();
36 #endif
37
38 #include "new_dynarec_config.h"
39 #include "backends/psx/emu_if.h" //emulator interface
40
41 //#define DISASM
42 //#define assem_debug printf
43 //#define inv_debug printf
44 #define assem_debug(...)
45 #define inv_debug(...)
46
47 #ifdef __i386__
48 #include "assem_x86.h"
49 #endif
50 #ifdef __x86_64__
51 #include "assem_x64.h"
52 #endif
53 #ifdef __arm__
54 #include "arm/assem_arm.h"
55 #endif
56
57 #ifdef VITA
58 int _newlib_vm_size_user = 1 << TARGET_SIZE_2;
59 #endif
60
61 #define MAXBLOCK 4096
62 #define MAX_OUTPUT_BLOCK_SIZE 262144
63
64 struct regstat
65 {
66   signed char regmap_entry[HOST_REGS];
67   signed char regmap[HOST_REGS];
68   uint64_t was32;
69   uint64_t is32;
70   uint64_t wasdirty;
71   uint64_t dirty;
72   uint64_t u;
73   uint64_t uu;
74   u_int wasconst;
75   u_int isconst;
76   u_int loadedconst;             // host regs that have constants loaded
77   u_int waswritten;              // MIPS regs that were used as store base before
78 };
79
80 // note: asm depends on this layout
81 struct ll_entry
82 {
83   u_int vaddr;
84   u_int reg_sv_flags;
85   void *addr;
86   struct ll_entry *next;
87 };
88
89   // used by asm:
90   u_char *out;
91   u_int hash_table[65536][4]  __attribute__((aligned(16)));
92   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
93   struct ll_entry *jump_dirty[4096];
94
95   static struct ll_entry *jump_out[4096];
96   static u_int start;
97   static u_int *source;
98   static char insn[MAXBLOCK][10];
99   static u_char itype[MAXBLOCK];
100   static u_char opcode[MAXBLOCK];
101   static u_char opcode2[MAXBLOCK];
102   static u_char bt[MAXBLOCK];
103   static u_char rs1[MAXBLOCK];
104   static u_char rs2[MAXBLOCK];
105   static u_char rt1[MAXBLOCK];
106   static u_char rt2[MAXBLOCK];
107   static u_char us1[MAXBLOCK];
108   static u_char us2[MAXBLOCK];
109   static u_char dep1[MAXBLOCK];
110   static u_char dep2[MAXBLOCK];
111   static u_char lt1[MAXBLOCK];
112   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
113   static uint64_t gte_rt[MAXBLOCK];
114   static uint64_t gte_unneeded[MAXBLOCK];
115   static u_int smrv[32]; // speculated MIPS register values
116   static u_int smrv_strong; // mask or regs that are likely to have correct values
117   static u_int smrv_weak; // same, but somewhat less likely
118   static u_int smrv_strong_next; // same, but after current insn executes
119   static u_int smrv_weak_next;
120   static int imm[MAXBLOCK];
121   static u_int ba[MAXBLOCK];
122   static char likely[MAXBLOCK];
123   static char is_ds[MAXBLOCK];
124   static char ooo[MAXBLOCK];
125   static uint64_t unneeded_reg[MAXBLOCK];
126   static uint64_t unneeded_reg_upper[MAXBLOCK];
127   static uint64_t branch_unneeded_reg[MAXBLOCK];
128   static uint64_t branch_unneeded_reg_upper[MAXBLOCK];
129   static signed char regmap_pre[MAXBLOCK][HOST_REGS];
130   static uint64_t current_constmap[HOST_REGS];
131   static uint64_t constmap[MAXBLOCK][HOST_REGS];
132   static struct regstat regs[MAXBLOCK];
133   static struct regstat branch_regs[MAXBLOCK];
134   static signed char minimum_free_regs[MAXBLOCK];
135   static u_int needed_reg[MAXBLOCK];
136   static u_int wont_dirty[MAXBLOCK];
137   static u_int will_dirty[MAXBLOCK];
138   static int ccadj[MAXBLOCK];
139   static int slen;
140   static u_int instr_addr[MAXBLOCK];
141   static u_int link_addr[MAXBLOCK][3];
142   static int linkcount;
143   static u_int stubs[MAXBLOCK*3][8];
144   static int stubcount;
145   static u_int literals[1024][2];
146   static int literalcount;
147   static int is_delayslot;
148   static int cop1_usable;
149   static char shadow[1048576]  __attribute__((aligned(16)));
150   static void *copy;
151   static int expirep;
152   static u_int stop_after_jal;
153 #ifndef RAM_FIXED
154   static u_int ram_offset;
155 #else
156   static const u_int ram_offset=0;
157 #endif
158
159   int new_dynarec_hacks;
160   int new_dynarec_did_compile;
161   extern u_char restore_candidate[512];
162   extern int cycle_count;
163
164   /* registers that may be allocated */
165   /* 1-31 gpr */
166 #define HIREG 32 // hi
167 #define LOREG 33 // lo
168 #define FSREG 34 // FPU status (FCSR)
169 #define CSREG 35 // Coprocessor status
170 #define CCREG 36 // Cycle count
171 #define INVCP 37 // Pointer to invalid_code
172 //#define MMREG 38 // Pointer to memory_map
173 #define ROREG 39 // ram offset (if rdram!=0x80000000)
174 #define TEMPREG 40
175 #define FTEMP 40 // FPU temporary register
176 #define PTEMP 41 // Prefetch temporary register
177 //#define TLREG 42 // TLB mapping offset
178 #define RHASH 43 // Return address hash
179 #define RHTBL 44 // Return address hash table address
180 #define RTEMP 45 // JR/JALR address register
181 #define MAXREG 45
182 #define AGEN1 46 // Address generation temporary register
183 //#define AGEN2 47 // Address generation temporary register
184 //#define MGEN1 48 // Maptable address generation temporary register
185 //#define MGEN2 49 // Maptable address generation temporary register
186 #define BTREG 50 // Branch target temporary register
187
188   /* instruction types */
189 #define NOP 0     // No operation
190 #define LOAD 1    // Load
191 #define STORE 2   // Store
192 #define LOADLR 3  // Unaligned load
193 #define STORELR 4 // Unaligned store
194 #define MOV 5     // Move
195 #define ALU 6     // Arithmetic/logic
196 #define MULTDIV 7 // Multiply/divide
197 #define SHIFT 8   // Shift by register
198 #define SHIFTIMM 9// Shift by immediate
199 #define IMM16 10  // 16-bit immediate
200 #define RJUMP 11  // Unconditional jump to register
201 #define UJUMP 12  // Unconditional jump
202 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
203 #define SJUMP 14  // Conditional branch (regimm format)
204 #define COP0 15   // Coprocessor 0
205 #define COP1 16   // Coprocessor 1
206 #define C1LS 17   // Coprocessor 1 load/store
207 #define FJUMP 18  // Conditional branch (floating point)
208 #define FLOAT 19  // Floating point unit
209 #define FCONV 20  // Convert integer to float
210 #define FCOMP 21  // Floating point compare (sets FSREG)
211 #define SYSCALL 22// SYSCALL
212 #define OTHER 23  // Other
213 #define SPAN 24   // Branch/delay slot spans 2 pages
214 #define NI 25     // Not implemented
215 #define HLECALL 26// PCSX fake opcodes for HLE
216 #define COP2 27   // Coprocessor 2 move
217 #define C2LS 28   // Coprocessor 2 load/store
218 #define C2OP 29   // Coprocessor 2 operation
219 #define INTCALL 30// Call interpreter to handle rare corner cases
220
221   /* stubs */
222 #define CC_STUB 1
223 #define FP_STUB 2
224 #define LOADB_STUB 3
225 #define LOADH_STUB 4
226 #define LOADW_STUB 5
227 #define LOADD_STUB 6
228 #define LOADBU_STUB 7
229 #define LOADHU_STUB 8
230 #define STOREB_STUB 9
231 #define STOREH_STUB 10
232 #define STOREW_STUB 11
233 #define STORED_STUB 12
234 #define STORELR_STUB 13
235 #define INVCODE_STUB 14
236
237   /* branch codes */
238 #define TAKEN 1
239 #define NOTTAKEN 2
240 #define NULLDS 3
241
242 // asm linkage
243 int new_recompile_block(int addr);
244 void *get_addr_ht(u_int vaddr);
245 void invalidate_block(u_int block);
246 void invalidate_addr(u_int addr);
247 void remove_hash(int vaddr);
248 void dyna_linker();
249 void dyna_linker_ds();
250 void verify_code();
251 void verify_code_vm();
252 void verify_code_ds();
253 void cc_interrupt();
254 void fp_exception();
255 void fp_exception_ds();
256 void jump_syscall_hle();
257 void jump_hlecall();
258 void jump_intcall();
259 void new_dyna_leave();
260
261 // Needed by assembler
262 static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
263 static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
264 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
265 static void load_all_regs(signed char i_regmap[]);
266 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
267 static void load_regs_entry(int t);
268 static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
269
270 static int verify_dirty(u_int *ptr);
271 static int get_final_value(int hr, int i, int *value);
272 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e);
273 static void add_to_linker(int addr,int target,int ext);
274
275 static int tracedebug=0;
276
277 static void mprotect_w_x(void *start, void *end, int is_x)
278 {
279 #ifdef NO_WRITE_EXEC
280   #if defined(VITA)
281   // *Open* enables write on all memory that was
282   // allocated by sceKernelAllocMemBlockForVM()?
283   if (is_x)
284     sceKernelCloseVMDomain();
285   else
286     sceKernelOpenVMDomain();
287   #else
288   u_long mstart = (u_long)start & ~4095ul;
289   u_long mend = (u_long)end;
290   if (mprotect((void *)mstart, mend - mstart,
291                PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
292     SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
293   #endif
294 #endif
295 }
296
297 static void start_tcache_write(void *start, void *end)
298 {
299   mprotect_w_x(start, end, 0);
300 }
301
302 static void end_tcache_write(void *start, void *end)
303 {
304 #ifdef __arm__
305   size_t len = (char *)end - (char *)start;
306   #if   defined(__BLACKBERRY_QNX__)
307   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
308   #elif defined(__MACH__)
309   sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
310   #elif defined(VITA)
311   sceKernelSyncVMDomain(sceBlock, start, len);
312   #elif defined(_3DS)
313   ctr_flush_invalidate_cache();
314   #else
315   __clear_cache(start, end);
316   #endif
317   (void)len;
318 #endif
319
320   mprotect_w_x(start, end, 1);
321 }
322
323 static void *start_block(void)
324 {
325   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
326   if (end > (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2))
327     end = (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2);
328   start_tcache_write(out, end);
329   return out;
330 }
331
332 static void end_block(void *start)
333 {
334   end_tcache_write(start, out);
335 }
336
337 //#define DEBUG_CYCLE_COUNT 1
338
339 #define NO_CYCLE_PENALTY_THR 12
340
341 int cycle_multiplier; // 100 for 1.0
342
343 static int CLOCK_ADJUST(int x)
344 {
345   int s=(x>>31)|1;
346   return (x * cycle_multiplier + s * 50) / 100;
347 }
348
349 static u_int get_page(u_int vaddr)
350 {
351   u_int page=vaddr&~0xe0000000;
352   if (page < 0x1000000)
353     page &= ~0x0e00000; // RAM mirrors
354   page>>=12;
355   if(page>2048) page=2048+(page&2047);
356   return page;
357 }
358
359 // no virtual mem in PCSX
360 static u_int get_vpage(u_int vaddr)
361 {
362   return get_page(vaddr);
363 }
364
365 // Get address from virtual address
366 // This is called from the recompiled JR/JALR instructions
367 void *get_addr(u_int vaddr)
368 {
369   u_int page=get_page(vaddr);
370   u_int vpage=get_vpage(vaddr);
371   struct ll_entry *head;
372   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
373   head=jump_in[page];
374   while(head!=NULL) {
375     if(head->vaddr==vaddr) {
376   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
377       u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
378       ht_bin[3]=ht_bin[1];
379       ht_bin[2]=ht_bin[0];
380       ht_bin[1]=(u_int)head->addr;
381       ht_bin[0]=vaddr;
382       return head->addr;
383     }
384     head=head->next;
385   }
386   head=jump_dirty[vpage];
387   while(head!=NULL) {
388     if(head->vaddr==vaddr) {
389       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
390       // Don't restore blocks which are about to expire from the cache
391       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
392       if(verify_dirty(head->addr)) {
393         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
394         invalid_code[vaddr>>12]=0;
395         inv_code_start=inv_code_end=~0;
396         if(vpage<2048) {
397           restore_candidate[vpage>>3]|=1<<(vpage&7);
398         }
399         else restore_candidate[page>>3]|=1<<(page&7);
400         u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
401         if(ht_bin[0]==vaddr) {
402           ht_bin[1]=(u_int)head->addr; // Replace existing entry
403         }
404         else
405         {
406           ht_bin[3]=ht_bin[1];
407           ht_bin[2]=ht_bin[0];
408           ht_bin[1]=(int)head->addr;
409           ht_bin[0]=vaddr;
410         }
411         return head->addr;
412       }
413     }
414     head=head->next;
415   }
416   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
417   int r=new_recompile_block(vaddr);
418   if(r==0) return get_addr(vaddr);
419   // Execute in unmapped page, generate pagefault execption
420   Status|=2;
421   Cause=(vaddr<<31)|0x8;
422   EPC=(vaddr&1)?vaddr-5:vaddr;
423   BadVAddr=(vaddr&~1);
424   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
425   EntryHi=BadVAddr&0xFFFFE000;
426   return get_addr_ht(0x80000000);
427 }
428 // Look up address in hash table first
429 void *get_addr_ht(u_int vaddr)
430 {
431   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
432   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
433   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
434   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
435   return get_addr(vaddr);
436 }
437
438 void clear_all_regs(signed char regmap[])
439 {
440   int hr;
441   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
442 }
443
444 signed char get_reg(signed char regmap[],int r)
445 {
446   int hr;
447   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
448   return -1;
449 }
450
451 // Find a register that is available for two consecutive cycles
452 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
453 {
454   int hr;
455   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
456   return -1;
457 }
458
459 int count_free_regs(signed char regmap[])
460 {
461   int count=0;
462   int hr;
463   for(hr=0;hr<HOST_REGS;hr++)
464   {
465     if(hr!=EXCLUDE_REG) {
466       if(regmap[hr]<0) count++;
467     }
468   }
469   return count;
470 }
471
472 void dirty_reg(struct regstat *cur,signed char reg)
473 {
474   int hr;
475   if(!reg) return;
476   for (hr=0;hr<HOST_REGS;hr++) {
477     if((cur->regmap[hr]&63)==reg) {
478       cur->dirty|=1<<hr;
479     }
480   }
481 }
482
483 // If we dirty the lower half of a 64 bit register which is now being
484 // sign-extended, we need to dump the upper half.
485 // Note: Do this only after completion of the instruction, because
486 // some instructions may need to read the full 64-bit value even if
487 // overwriting it (eg SLTI, DSRA32).
488 static void flush_dirty_uppers(struct regstat *cur)
489 {
490   int hr,reg;
491   for (hr=0;hr<HOST_REGS;hr++) {
492     if((cur->dirty>>hr)&1) {
493       reg=cur->regmap[hr];
494       if(reg>=64)
495         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
496     }
497   }
498 }
499
500 void set_const(struct regstat *cur,signed char reg,uint64_t value)
501 {
502   int hr;
503   if(!reg) return;
504   for (hr=0;hr<HOST_REGS;hr++) {
505     if(cur->regmap[hr]==reg) {
506       cur->isconst|=1<<hr;
507       current_constmap[hr]=value;
508     }
509     else if((cur->regmap[hr]^64)==reg) {
510       cur->isconst|=1<<hr;
511       current_constmap[hr]=value>>32;
512     }
513   }
514 }
515
516 void clear_const(struct regstat *cur,signed char reg)
517 {
518   int hr;
519   if(!reg) return;
520   for (hr=0;hr<HOST_REGS;hr++) {
521     if((cur->regmap[hr]&63)==reg) {
522       cur->isconst&=~(1<<hr);
523     }
524   }
525 }
526
527 int is_const(struct regstat *cur,signed char reg)
528 {
529   int hr;
530   if(reg<0) return 0;
531   if(!reg) return 1;
532   for (hr=0;hr<HOST_REGS;hr++) {
533     if((cur->regmap[hr]&63)==reg) {
534       return (cur->isconst>>hr)&1;
535     }
536   }
537   return 0;
538 }
539 uint64_t get_const(struct regstat *cur,signed char reg)
540 {
541   int hr;
542   if(!reg) return 0;
543   for (hr=0;hr<HOST_REGS;hr++) {
544     if(cur->regmap[hr]==reg) {
545       return current_constmap[hr];
546     }
547   }
548   SysPrintf("Unknown constant in r%d\n",reg);
549   exit(1);
550 }
551
552 // Least soon needed registers
553 // Look at the next ten instructions and see which registers
554 // will be used.  Try not to reallocate these.
555 void lsn(u_char hsn[], int i, int *preferred_reg)
556 {
557   int j;
558   int b=-1;
559   for(j=0;j<9;j++)
560   {
561     if(i+j>=slen) {
562       j=slen-i-1;
563       break;
564     }
565     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
566     {
567       // Don't go past an unconditonal jump
568       j++;
569       break;
570     }
571   }
572   for(;j>=0;j--)
573   {
574     if(rs1[i+j]) hsn[rs1[i+j]]=j;
575     if(rs2[i+j]) hsn[rs2[i+j]]=j;
576     if(rt1[i+j]) hsn[rt1[i+j]]=j;
577     if(rt2[i+j]) hsn[rt2[i+j]]=j;
578     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
579       // Stores can allocate zero
580       hsn[rs1[i+j]]=j;
581       hsn[rs2[i+j]]=j;
582     }
583     // On some architectures stores need invc_ptr
584     #if defined(HOST_IMM8)
585     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
586       hsn[INVCP]=j;
587     }
588     #endif
589     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
590     {
591       hsn[CCREG]=j;
592       b=j;
593     }
594   }
595   if(b>=0)
596   {
597     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
598     {
599       // Follow first branch
600       int t=(ba[i+b]-start)>>2;
601       j=7-b;if(t+j>=slen) j=slen-t-1;
602       for(;j>=0;j--)
603       {
604         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
605         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
606         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
607         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
608       }
609     }
610     // TODO: preferred register based on backward branch
611   }
612   // Delay slot should preferably not overwrite branch conditions or cycle count
613   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
614     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
615     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
616     hsn[CCREG]=1;
617     // ...or hash tables
618     hsn[RHASH]=1;
619     hsn[RHTBL]=1;
620   }
621   // Coprocessor load/store needs FTEMP, even if not declared
622   if(itype[i]==C1LS||itype[i]==C2LS) {
623     hsn[FTEMP]=0;
624   }
625   // Load L/R also uses FTEMP as a temporary register
626   if(itype[i]==LOADLR) {
627     hsn[FTEMP]=0;
628   }
629   // Also SWL/SWR/SDL/SDR
630   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
631     hsn[FTEMP]=0;
632   }
633   // Don't remove the miniht registers
634   if(itype[i]==UJUMP||itype[i]==RJUMP)
635   {
636     hsn[RHASH]=0;
637     hsn[RHTBL]=0;
638   }
639 }
640
641 // We only want to allocate registers if we're going to use them again soon
642 int needed_again(int r, int i)
643 {
644   int j;
645   int b=-1;
646   int rn=10;
647
648   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
649   {
650     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
651       return 0; // Don't need any registers if exiting the block
652   }
653   for(j=0;j<9;j++)
654   {
655     if(i+j>=slen) {
656       j=slen-i-1;
657       break;
658     }
659     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
660     {
661       // Don't go past an unconditonal jump
662       j++;
663       break;
664     }
665     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
666     {
667       break;
668     }
669   }
670   for(;j>=1;j--)
671   {
672     if(rs1[i+j]==r) rn=j;
673     if(rs2[i+j]==r) rn=j;
674     if((unneeded_reg[i+j]>>r)&1) rn=10;
675     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
676     {
677       b=j;
678     }
679   }
680   /*
681   if(b>=0)
682   {
683     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
684     {
685       // Follow first branch
686       int o=rn;
687       int t=(ba[i+b]-start)>>2;
688       j=7-b;if(t+j>=slen) j=slen-t-1;
689       for(;j>=0;j--)
690       {
691         if(!((unneeded_reg[t+j]>>r)&1)) {
692           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
693           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
694         }
695         else rn=o;
696       }
697     }
698   }*/
699   if(rn<10) return 1;
700   (void)b;
701   return 0;
702 }
703
704 // Try to match register allocations at the end of a loop with those
705 // at the beginning
706 int loop_reg(int i, int r, int hr)
707 {
708   int j,k;
709   for(j=0;j<9;j++)
710   {
711     if(i+j>=slen) {
712       j=slen-i-1;
713       break;
714     }
715     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
716     {
717       // Don't go past an unconditonal jump
718       j++;
719       break;
720     }
721   }
722   k=0;
723   if(i>0){
724     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
725       k--;
726   }
727   for(;k<j;k++)
728   {
729     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
730     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
731     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
732     {
733       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
734       {
735         int t=(ba[i+k]-start)>>2;
736         int reg=get_reg(regs[t].regmap_entry,r);
737         if(reg>=0) return reg;
738         //reg=get_reg(regs[t+1].regmap_entry,r);
739         //if(reg>=0) return reg;
740       }
741     }
742   }
743   return hr;
744 }
745
746
747 // Allocate every register, preserving source/target regs
748 void alloc_all(struct regstat *cur,int i)
749 {
750   int hr;
751
752   for(hr=0;hr<HOST_REGS;hr++) {
753     if(hr!=EXCLUDE_REG) {
754       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
755          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
756       {
757         cur->regmap[hr]=-1;
758         cur->dirty&=~(1<<hr);
759       }
760       // Don't need zeros
761       if((cur->regmap[hr]&63)==0)
762       {
763         cur->regmap[hr]=-1;
764         cur->dirty&=~(1<<hr);
765       }
766     }
767   }
768 }
769
770 #ifdef __i386__
771 #include "assem_x86.c"
772 #endif
773 #ifdef __x86_64__
774 #include "assem_x64.c"
775 #endif
776 #ifdef __arm__
777 #include "arm/assem_arm.c"
778 #endif
779
780 // Add virtual address mapping to linked list
781 void ll_add(struct ll_entry **head,int vaddr,void *addr)
782 {
783   struct ll_entry *new_entry;
784   new_entry=malloc(sizeof(struct ll_entry));
785   assert(new_entry!=NULL);
786   new_entry->vaddr=vaddr;
787   new_entry->reg_sv_flags=0;
788   new_entry->addr=addr;
789   new_entry->next=*head;
790   *head=new_entry;
791 }
792
793 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
794 {
795   ll_add(head,vaddr,addr);
796   (*head)->reg_sv_flags=reg_sv_flags;
797 }
798
799 // Check if an address is already compiled
800 // but don't return addresses which are about to expire from the cache
801 void *check_addr(u_int vaddr)
802 {
803   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
804   if(ht_bin[0]==vaddr) {
805     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
806       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
807   }
808   if(ht_bin[2]==vaddr) {
809     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
810       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
811   }
812   u_int page=get_page(vaddr);
813   struct ll_entry *head;
814   head=jump_in[page];
815   while(head!=NULL) {
816     if(head->vaddr==vaddr) {
817       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
818         // Update existing entry with current address
819         if(ht_bin[0]==vaddr) {
820           ht_bin[1]=(int)head->addr;
821           return head->addr;
822         }
823         if(ht_bin[2]==vaddr) {
824           ht_bin[3]=(int)head->addr;
825           return head->addr;
826         }
827         // Insert into hash table with low priority.
828         // Don't evict existing entries, as they are probably
829         // addresses that are being accessed frequently.
830         if(ht_bin[0]==-1) {
831           ht_bin[1]=(int)head->addr;
832           ht_bin[0]=vaddr;
833         }else if(ht_bin[2]==-1) {
834           ht_bin[3]=(int)head->addr;
835           ht_bin[2]=vaddr;
836         }
837         return head->addr;
838       }
839     }
840     head=head->next;
841   }
842   return 0;
843 }
844
845 void remove_hash(int vaddr)
846 {
847   //printf("remove hash: %x\n",vaddr);
848   u_int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
849   if(ht_bin[2]==vaddr) {
850     ht_bin[2]=ht_bin[3]=-1;
851   }
852   if(ht_bin[0]==vaddr) {
853     ht_bin[0]=ht_bin[2];
854     ht_bin[1]=ht_bin[3];
855     ht_bin[2]=ht_bin[3]=-1;
856   }
857 }
858
859 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
860 {
861   struct ll_entry *next;
862   while(*head) {
863     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
864        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
865     {
866       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
867       remove_hash((*head)->vaddr);
868       next=(*head)->next;
869       free(*head);
870       *head=next;
871     }
872     else
873     {
874       head=&((*head)->next);
875     }
876   }
877 }
878
879 // Remove all entries from linked list
880 void ll_clear(struct ll_entry **head)
881 {
882   struct ll_entry *cur;
883   struct ll_entry *next;
884   if((cur=*head)) {
885     *head=0;
886     while(cur) {
887       next=cur->next;
888       free(cur);
889       cur=next;
890     }
891   }
892 }
893
894 // Dereference the pointers and remove if it matches
895 static void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
896 {
897   while(head) {
898     int ptr=get_pointer(head->addr);
899     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
900     if(((ptr>>shift)==(addr>>shift)) ||
901        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
902     {
903       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
904       void *host_addr=find_extjump_insn(head->addr);
905       #ifdef __arm__
906         mark_clear_cache(host_addr);
907       #endif
908       set_jump_target((int)host_addr,(int)head->addr);
909     }
910     head=head->next;
911   }
912 }
913
914 // This is called when we write to a compiled block (see do_invstub)
915 void invalidate_page(u_int page)
916 {
917   struct ll_entry *head;
918   struct ll_entry *next;
919   head=jump_in[page];
920   jump_in[page]=0;
921   while(head!=NULL) {
922     inv_debug("INVALIDATE: %x\n",head->vaddr);
923     remove_hash(head->vaddr);
924     next=head->next;
925     free(head);
926     head=next;
927   }
928   head=jump_out[page];
929   jump_out[page]=0;
930   while(head!=NULL) {
931     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
932     void *host_addr=find_extjump_insn(head->addr);
933     #ifdef __arm__
934       mark_clear_cache(host_addr);
935     #endif
936     set_jump_target((int)host_addr,(int)head->addr);
937     next=head->next;
938     free(head);
939     head=next;
940   }
941 }
942
943 static void invalidate_block_range(u_int block, u_int first, u_int last)
944 {
945   u_int page=get_page(block<<12);
946   //printf("first=%d last=%d\n",first,last);
947   invalidate_page(page);
948   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
949   assert(last<page+5);
950   // Invalidate the adjacent pages if a block crosses a 4K boundary
951   while(first<page) {
952     invalidate_page(first);
953     first++;
954   }
955   for(first=page+1;first<last;first++) {
956     invalidate_page(first);
957   }
958   #ifdef __arm__
959     do_clear_cache();
960   #endif
961
962   // Don't trap writes
963   invalid_code[block]=1;
964
965   #ifdef USE_MINI_HT
966   memset(mini_ht,-1,sizeof(mini_ht));
967   #endif
968 }
969
970 void invalidate_block(u_int block)
971 {
972   u_int page=get_page(block<<12);
973   u_int vpage=get_vpage(block<<12);
974   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
975   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
976   u_int first,last;
977   first=last=page;
978   struct ll_entry *head;
979   head=jump_dirty[vpage];
980   //printf("page=%d vpage=%d\n",page,vpage);
981   while(head!=NULL) {
982     u_int start,end;
983     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
984       get_bounds((int)head->addr,&start,&end);
985       //printf("start: %x end: %x\n",start,end);
986       if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
987         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
988           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
989           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
990         }
991       }
992     }
993     head=head->next;
994   }
995   invalidate_block_range(block,first,last);
996 }
997
998 void invalidate_addr(u_int addr)
999 {
1000   //static int rhits;
1001   // this check is done by the caller
1002   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1003   u_int page=get_vpage(addr);
1004   if(page<2048) { // RAM
1005     struct ll_entry *head;
1006     u_int addr_min=~0, addr_max=0;
1007     u_int mask=RAM_SIZE-1;
1008     u_int addr_main=0x80000000|(addr&mask);
1009     int pg1;
1010     inv_code_start=addr_main&~0xfff;
1011     inv_code_end=addr_main|0xfff;
1012     pg1=page;
1013     if (pg1>0) {
1014       // must check previous page too because of spans..
1015       pg1--;
1016       inv_code_start-=0x1000;
1017     }
1018     for(;pg1<=page;pg1++) {
1019       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1020         u_int start,end;
1021         get_bounds((int)head->addr,&start,&end);
1022         if(ram_offset) {
1023           start-=ram_offset;
1024           end-=ram_offset;
1025         }
1026         if(start<=addr_main&&addr_main<end) {
1027           if(start<addr_min) addr_min=start;
1028           if(end>addr_max) addr_max=end;
1029         }
1030         else if(addr_main<start) {
1031           if(start<inv_code_end)
1032             inv_code_end=start-1;
1033         }
1034         else {
1035           if(end>inv_code_start)
1036             inv_code_start=end;
1037         }
1038       }
1039     }
1040     if (addr_min!=~0) {
1041       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1042       inv_code_start=inv_code_end=~0;
1043       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1044       return;
1045     }
1046     else {
1047       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1048       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1049       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1050       return;
1051     }
1052   }
1053   invalidate_block(addr>>12);
1054 }
1055
1056 // This is called when loading a save state.
1057 // Anything could have changed, so invalidate everything.
1058 void invalidate_all_pages()
1059 {
1060   u_int page;
1061   for(page=0;page<4096;page++)
1062     invalidate_page(page);
1063   for(page=0;page<1048576;page++)
1064     if(!invalid_code[page]) {
1065       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1066       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1067     }
1068   #ifdef USE_MINI_HT
1069   memset(mini_ht,-1,sizeof(mini_ht));
1070   #endif
1071 }
1072
1073 // Add an entry to jump_out after making a link
1074 void add_link(u_int vaddr,void *src)
1075 {
1076   u_int page=get_page(vaddr);
1077   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1078   int *ptr=(int *)(src+4);
1079   assert((*ptr&0x0fff0000)==0x059f0000);
1080   (void)ptr;
1081   ll_add(jump_out+page,vaddr,src);
1082   //int ptr=get_pointer(src);
1083   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1084 }
1085
1086 // If a code block was found to be unmodified (bit was set in
1087 // restore_candidate) and it remains unmodified (bit is clear
1088 // in invalid_code) then move the entries for that 4K page from
1089 // the dirty list to the clean list.
1090 void clean_blocks(u_int page)
1091 {
1092   struct ll_entry *head;
1093   inv_debug("INV: clean_blocks page=%d\n",page);
1094   head=jump_dirty[page];
1095   while(head!=NULL) {
1096     if(!invalid_code[head->vaddr>>12]) {
1097       // Don't restore blocks which are about to expire from the cache
1098       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1099         u_int start,end;
1100         if(verify_dirty(head->addr)) {
1101           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1102           u_int i;
1103           u_int inv=0;
1104           get_bounds((int)head->addr,&start,&end);
1105           if(start-(u_int)rdram<RAM_SIZE) {
1106             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1107               inv|=invalid_code[i];
1108             }
1109           }
1110           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1111             inv=1;
1112           }
1113           if(!inv) {
1114             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1115             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1116               u_int ppage=page;
1117               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1118               //printf("page=%x, addr=%x\n",page,head->vaddr);
1119               //assert(head->vaddr>>12==(page|0x80000));
1120               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1121               u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1122               if(ht_bin[0]==head->vaddr) {
1123                 ht_bin[1]=(u_int)clean_addr; // Replace existing entry
1124               }
1125               if(ht_bin[2]==head->vaddr) {
1126                 ht_bin[3]=(u_int)clean_addr; // Replace existing entry
1127               }
1128             }
1129           }
1130         }
1131       }
1132     }
1133     head=head->next;
1134   }
1135 }
1136
1137
1138 void mov_alloc(struct regstat *current,int i)
1139 {
1140   // Note: Don't need to actually alloc the source registers
1141   if((~current->is32>>rs1[i])&1) {
1142     //alloc_reg64(current,i,rs1[i]);
1143     alloc_reg64(current,i,rt1[i]);
1144     current->is32&=~(1LL<<rt1[i]);
1145   } else {
1146     //alloc_reg(current,i,rs1[i]);
1147     alloc_reg(current,i,rt1[i]);
1148     current->is32|=(1LL<<rt1[i]);
1149   }
1150   clear_const(current,rs1[i]);
1151   clear_const(current,rt1[i]);
1152   dirty_reg(current,rt1[i]);
1153 }
1154
1155 void shiftimm_alloc(struct regstat *current,int i)
1156 {
1157   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1158   {
1159     if(rt1[i]) {
1160       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1161       else lt1[i]=rs1[i];
1162       alloc_reg(current,i,rt1[i]);
1163       current->is32|=1LL<<rt1[i];
1164       dirty_reg(current,rt1[i]);
1165       if(is_const(current,rs1[i])) {
1166         int v=get_const(current,rs1[i]);
1167         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1168         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1169         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1170       }
1171       else clear_const(current,rt1[i]);
1172     }
1173   }
1174   else
1175   {
1176     clear_const(current,rs1[i]);
1177     clear_const(current,rt1[i]);
1178   }
1179
1180   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1181   {
1182     if(rt1[i]) {
1183       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1184       alloc_reg64(current,i,rt1[i]);
1185       current->is32&=~(1LL<<rt1[i]);
1186       dirty_reg(current,rt1[i]);
1187     }
1188   }
1189   if(opcode2[i]==0x3c) // DSLL32
1190   {
1191     if(rt1[i]) {
1192       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1193       alloc_reg64(current,i,rt1[i]);
1194       current->is32&=~(1LL<<rt1[i]);
1195       dirty_reg(current,rt1[i]);
1196     }
1197   }
1198   if(opcode2[i]==0x3e) // DSRL32
1199   {
1200     if(rt1[i]) {
1201       alloc_reg64(current,i,rs1[i]);
1202       if(imm[i]==32) {
1203         alloc_reg64(current,i,rt1[i]);
1204         current->is32&=~(1LL<<rt1[i]);
1205       } else {
1206         alloc_reg(current,i,rt1[i]);
1207         current->is32|=1LL<<rt1[i];
1208       }
1209       dirty_reg(current,rt1[i]);
1210     }
1211   }
1212   if(opcode2[i]==0x3f) // DSRA32
1213   {
1214     if(rt1[i]) {
1215       alloc_reg64(current,i,rs1[i]);
1216       alloc_reg(current,i,rt1[i]);
1217       current->is32|=1LL<<rt1[i];
1218       dirty_reg(current,rt1[i]);
1219     }
1220   }
1221 }
1222
1223 void shift_alloc(struct regstat *current,int i)
1224 {
1225   if(rt1[i]) {
1226     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1227     {
1228       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1229       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1230       alloc_reg(current,i,rt1[i]);
1231       if(rt1[i]==rs2[i]) {
1232         alloc_reg_temp(current,i,-1);
1233         minimum_free_regs[i]=1;
1234       }
1235       current->is32|=1LL<<rt1[i];
1236     } else { // DSLLV/DSRLV/DSRAV
1237       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1238       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1239       alloc_reg64(current,i,rt1[i]);
1240       current->is32&=~(1LL<<rt1[i]);
1241       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1242       {
1243         alloc_reg_temp(current,i,-1);
1244         minimum_free_regs[i]=1;
1245       }
1246     }
1247     clear_const(current,rs1[i]);
1248     clear_const(current,rs2[i]);
1249     clear_const(current,rt1[i]);
1250     dirty_reg(current,rt1[i]);
1251   }
1252 }
1253
1254 void alu_alloc(struct regstat *current,int i)
1255 {
1256   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1257     if(rt1[i]) {
1258       if(rs1[i]&&rs2[i]) {
1259         alloc_reg(current,i,rs1[i]);
1260         alloc_reg(current,i,rs2[i]);
1261       }
1262       else {
1263         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1264         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1265       }
1266       alloc_reg(current,i,rt1[i]);
1267     }
1268     current->is32|=1LL<<rt1[i];
1269   }
1270   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1271     if(rt1[i]) {
1272       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1273       {
1274         alloc_reg64(current,i,rs1[i]);
1275         alloc_reg64(current,i,rs2[i]);
1276         alloc_reg(current,i,rt1[i]);
1277       } else {
1278         alloc_reg(current,i,rs1[i]);
1279         alloc_reg(current,i,rs2[i]);
1280         alloc_reg(current,i,rt1[i]);
1281       }
1282     }
1283     current->is32|=1LL<<rt1[i];
1284   }
1285   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1286     if(rt1[i]) {
1287       if(rs1[i]&&rs2[i]) {
1288         alloc_reg(current,i,rs1[i]);
1289         alloc_reg(current,i,rs2[i]);
1290       }
1291       else
1292       {
1293         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1294         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1295       }
1296       alloc_reg(current,i,rt1[i]);
1297       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1298       {
1299         if(!((current->uu>>rt1[i])&1)) {
1300           alloc_reg64(current,i,rt1[i]);
1301         }
1302         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1303           if(rs1[i]&&rs2[i]) {
1304             alloc_reg64(current,i,rs1[i]);
1305             alloc_reg64(current,i,rs2[i]);
1306           }
1307           else
1308           {
1309             // Is is really worth it to keep 64-bit values in registers?
1310             #ifdef NATIVE_64BIT
1311             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1312             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1313             #endif
1314           }
1315         }
1316         current->is32&=~(1LL<<rt1[i]);
1317       } else {
1318         current->is32|=1LL<<rt1[i];
1319       }
1320     }
1321   }
1322   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1323     if(rt1[i]) {
1324       if(rs1[i]&&rs2[i]) {
1325         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1326           alloc_reg64(current,i,rs1[i]);
1327           alloc_reg64(current,i,rs2[i]);
1328           alloc_reg64(current,i,rt1[i]);
1329         } else {
1330           alloc_reg(current,i,rs1[i]);
1331           alloc_reg(current,i,rs2[i]);
1332           alloc_reg(current,i,rt1[i]);
1333         }
1334       }
1335       else {
1336         alloc_reg(current,i,rt1[i]);
1337         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1338           // DADD used as move, or zeroing
1339           // If we have a 64-bit source, then make the target 64 bits too
1340           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1341             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1342             alloc_reg64(current,i,rt1[i]);
1343           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1344             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1345             alloc_reg64(current,i,rt1[i]);
1346           }
1347           if(opcode2[i]>=0x2e&&rs2[i]) {
1348             // DSUB used as negation - 64-bit result
1349             // If we have a 32-bit register, extend it to 64 bits
1350             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1351             alloc_reg64(current,i,rt1[i]);
1352           }
1353         }
1354       }
1355       if(rs1[i]&&rs2[i]) {
1356         current->is32&=~(1LL<<rt1[i]);
1357       } else if(rs1[i]) {
1358         current->is32&=~(1LL<<rt1[i]);
1359         if((current->is32>>rs1[i])&1)
1360           current->is32|=1LL<<rt1[i];
1361       } else if(rs2[i]) {
1362         current->is32&=~(1LL<<rt1[i]);
1363         if((current->is32>>rs2[i])&1)
1364           current->is32|=1LL<<rt1[i];
1365       } else {
1366         current->is32|=1LL<<rt1[i];
1367       }
1368     }
1369   }
1370   clear_const(current,rs1[i]);
1371   clear_const(current,rs2[i]);
1372   clear_const(current,rt1[i]);
1373   dirty_reg(current,rt1[i]);
1374 }
1375
1376 void imm16_alloc(struct regstat *current,int i)
1377 {
1378   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1379   else lt1[i]=rs1[i];
1380   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1381   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1382     current->is32&=~(1LL<<rt1[i]);
1383     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1384       // TODO: Could preserve the 32-bit flag if the immediate is zero
1385       alloc_reg64(current,i,rt1[i]);
1386       alloc_reg64(current,i,rs1[i]);
1387     }
1388     clear_const(current,rs1[i]);
1389     clear_const(current,rt1[i]);
1390   }
1391   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1392     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1393     current->is32|=1LL<<rt1[i];
1394     clear_const(current,rs1[i]);
1395     clear_const(current,rt1[i]);
1396   }
1397   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1398     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1399       if(rs1[i]!=rt1[i]) {
1400         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1401         alloc_reg64(current,i,rt1[i]);
1402         current->is32&=~(1LL<<rt1[i]);
1403       }
1404     }
1405     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1406     if(is_const(current,rs1[i])) {
1407       int v=get_const(current,rs1[i]);
1408       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1409       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1410       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1411     }
1412     else clear_const(current,rt1[i]);
1413   }
1414   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1415     if(is_const(current,rs1[i])) {
1416       int v=get_const(current,rs1[i]);
1417       set_const(current,rt1[i],v+imm[i]);
1418     }
1419     else clear_const(current,rt1[i]);
1420     current->is32|=1LL<<rt1[i];
1421   }
1422   else {
1423     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1424     current->is32|=1LL<<rt1[i];
1425   }
1426   dirty_reg(current,rt1[i]);
1427 }
1428
1429 void load_alloc(struct regstat *current,int i)
1430 {
1431   clear_const(current,rt1[i]);
1432   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1433   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1434   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1435   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1436     alloc_reg(current,i,rt1[i]);
1437     assert(get_reg(current->regmap,rt1[i])>=0);
1438     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1439     {
1440       current->is32&=~(1LL<<rt1[i]);
1441       alloc_reg64(current,i,rt1[i]);
1442     }
1443     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1444     {
1445       current->is32&=~(1LL<<rt1[i]);
1446       alloc_reg64(current,i,rt1[i]);
1447       alloc_all(current,i);
1448       alloc_reg64(current,i,FTEMP);
1449       minimum_free_regs[i]=HOST_REGS;
1450     }
1451     else current->is32|=1LL<<rt1[i];
1452     dirty_reg(current,rt1[i]);
1453     // LWL/LWR need a temporary register for the old value
1454     if(opcode[i]==0x22||opcode[i]==0x26)
1455     {
1456       alloc_reg(current,i,FTEMP);
1457       alloc_reg_temp(current,i,-1);
1458       minimum_free_regs[i]=1;
1459     }
1460   }
1461   else
1462   {
1463     // Load to r0 or unneeded register (dummy load)
1464     // but we still need a register to calculate the address
1465     if(opcode[i]==0x22||opcode[i]==0x26)
1466     {
1467       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1468     }
1469     alloc_reg_temp(current,i,-1);
1470     minimum_free_regs[i]=1;
1471     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1472     {
1473       alloc_all(current,i);
1474       alloc_reg64(current,i,FTEMP);
1475       minimum_free_regs[i]=HOST_REGS;
1476     }
1477   }
1478 }
1479
1480 void store_alloc(struct regstat *current,int i)
1481 {
1482   clear_const(current,rs2[i]);
1483   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1484   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1485   alloc_reg(current,i,rs2[i]);
1486   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1487     alloc_reg64(current,i,rs2[i]);
1488     if(rs2[i]) alloc_reg(current,i,FTEMP);
1489   }
1490   #if defined(HOST_IMM8)
1491   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1492   else alloc_reg(current,i,INVCP);
1493   #endif
1494   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1495     alloc_reg(current,i,FTEMP);
1496   }
1497   // We need a temporary register for address generation
1498   alloc_reg_temp(current,i,-1);
1499   minimum_free_regs[i]=1;
1500 }
1501
1502 void c1ls_alloc(struct regstat *current,int i)
1503 {
1504   //clear_const(current,rs1[i]); // FIXME
1505   clear_const(current,rt1[i]);
1506   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1507   alloc_reg(current,i,CSREG); // Status
1508   alloc_reg(current,i,FTEMP);
1509   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1510     alloc_reg64(current,i,FTEMP);
1511   }
1512   #if defined(HOST_IMM8)
1513   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1514   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1515     alloc_reg(current,i,INVCP);
1516   #endif
1517   // We need a temporary register for address generation
1518   alloc_reg_temp(current,i,-1);
1519 }
1520
1521 void c2ls_alloc(struct regstat *current,int i)
1522 {
1523   clear_const(current,rt1[i]);
1524   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1525   alloc_reg(current,i,FTEMP);
1526   #if defined(HOST_IMM8)
1527   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1528   if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1529     alloc_reg(current,i,INVCP);
1530   #endif
1531   // We need a temporary register for address generation
1532   alloc_reg_temp(current,i,-1);
1533   minimum_free_regs[i]=1;
1534 }
1535
1536 #ifndef multdiv_alloc
1537 void multdiv_alloc(struct regstat *current,int i)
1538 {
1539   //  case 0x18: MULT
1540   //  case 0x19: MULTU
1541   //  case 0x1A: DIV
1542   //  case 0x1B: DIVU
1543   //  case 0x1C: DMULT
1544   //  case 0x1D: DMULTU
1545   //  case 0x1E: DDIV
1546   //  case 0x1F: DDIVU
1547   clear_const(current,rs1[i]);
1548   clear_const(current,rs2[i]);
1549   if(rs1[i]&&rs2[i])
1550   {
1551     if((opcode2[i]&4)==0) // 32-bit
1552     {
1553       current->u&=~(1LL<<HIREG);
1554       current->u&=~(1LL<<LOREG);
1555       alloc_reg(current,i,HIREG);
1556       alloc_reg(current,i,LOREG);
1557       alloc_reg(current,i,rs1[i]);
1558       alloc_reg(current,i,rs2[i]);
1559       current->is32|=1LL<<HIREG;
1560       current->is32|=1LL<<LOREG;
1561       dirty_reg(current,HIREG);
1562       dirty_reg(current,LOREG);
1563     }
1564     else // 64-bit
1565     {
1566       current->u&=~(1LL<<HIREG);
1567       current->u&=~(1LL<<LOREG);
1568       current->uu&=~(1LL<<HIREG);
1569       current->uu&=~(1LL<<LOREG);
1570       alloc_reg64(current,i,HIREG);
1571       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1572       alloc_reg64(current,i,rs1[i]);
1573       alloc_reg64(current,i,rs2[i]);
1574       alloc_all(current,i);
1575       current->is32&=~(1LL<<HIREG);
1576       current->is32&=~(1LL<<LOREG);
1577       dirty_reg(current,HIREG);
1578       dirty_reg(current,LOREG);
1579       minimum_free_regs[i]=HOST_REGS;
1580     }
1581   }
1582   else
1583   {
1584     // Multiply by zero is zero.
1585     // MIPS does not have a divide by zero exception.
1586     // The result is undefined, we return zero.
1587     alloc_reg(current,i,HIREG);
1588     alloc_reg(current,i,LOREG);
1589     current->is32|=1LL<<HIREG;
1590     current->is32|=1LL<<LOREG;
1591     dirty_reg(current,HIREG);
1592     dirty_reg(current,LOREG);
1593   }
1594 }
1595 #endif
1596
1597 void cop0_alloc(struct regstat *current,int i)
1598 {
1599   if(opcode2[i]==0) // MFC0
1600   {
1601     if(rt1[i]) {
1602       clear_const(current,rt1[i]);
1603       alloc_all(current,i);
1604       alloc_reg(current,i,rt1[i]);
1605       current->is32|=1LL<<rt1[i];
1606       dirty_reg(current,rt1[i]);
1607     }
1608   }
1609   else if(opcode2[i]==4) // MTC0
1610   {
1611     if(rs1[i]){
1612       clear_const(current,rs1[i]);
1613       alloc_reg(current,i,rs1[i]);
1614       alloc_all(current,i);
1615     }
1616     else {
1617       alloc_all(current,i); // FIXME: Keep r0
1618       current->u&=~1LL;
1619       alloc_reg(current,i,0);
1620     }
1621   }
1622   else
1623   {
1624     // TLBR/TLBWI/TLBWR/TLBP/ERET
1625     assert(opcode2[i]==0x10);
1626     alloc_all(current,i);
1627   }
1628   minimum_free_regs[i]=HOST_REGS;
1629 }
1630
1631 void cop1_alloc(struct regstat *current,int i)
1632 {
1633   alloc_reg(current,i,CSREG); // Load status
1634   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1635   {
1636     if(rt1[i]){
1637       clear_const(current,rt1[i]);
1638       if(opcode2[i]==1) {
1639         alloc_reg64(current,i,rt1[i]); // DMFC1
1640         current->is32&=~(1LL<<rt1[i]);
1641       }else{
1642         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1643         current->is32|=1LL<<rt1[i];
1644       }
1645       dirty_reg(current,rt1[i]);
1646     }
1647     alloc_reg_temp(current,i,-1);
1648   }
1649   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1650   {
1651     if(rs1[i]){
1652       clear_const(current,rs1[i]);
1653       if(opcode2[i]==5)
1654         alloc_reg64(current,i,rs1[i]); // DMTC1
1655       else
1656         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1657       alloc_reg_temp(current,i,-1);
1658     }
1659     else {
1660       current->u&=~1LL;
1661       alloc_reg(current,i,0);
1662       alloc_reg_temp(current,i,-1);
1663     }
1664   }
1665   minimum_free_regs[i]=1;
1666 }
1667 void fconv_alloc(struct regstat *current,int i)
1668 {
1669   alloc_reg(current,i,CSREG); // Load status
1670   alloc_reg_temp(current,i,-1);
1671   minimum_free_regs[i]=1;
1672 }
1673 void float_alloc(struct regstat *current,int i)
1674 {
1675   alloc_reg(current,i,CSREG); // Load status
1676   alloc_reg_temp(current,i,-1);
1677   minimum_free_regs[i]=1;
1678 }
1679 void c2op_alloc(struct regstat *current,int i)
1680 {
1681   alloc_reg_temp(current,i,-1);
1682 }
1683 void fcomp_alloc(struct regstat *current,int i)
1684 {
1685   alloc_reg(current,i,CSREG); // Load status
1686   alloc_reg(current,i,FSREG); // Load flags
1687   dirty_reg(current,FSREG); // Flag will be modified
1688   alloc_reg_temp(current,i,-1);
1689   minimum_free_regs[i]=1;
1690 }
1691
1692 void syscall_alloc(struct regstat *current,int i)
1693 {
1694   alloc_cc(current,i);
1695   dirty_reg(current,CCREG);
1696   alloc_all(current,i);
1697   minimum_free_regs[i]=HOST_REGS;
1698   current->isconst=0;
1699 }
1700
1701 void delayslot_alloc(struct regstat *current,int i)
1702 {
1703   switch(itype[i]) {
1704     case UJUMP:
1705     case CJUMP:
1706     case SJUMP:
1707     case RJUMP:
1708     case FJUMP:
1709     case SYSCALL:
1710     case HLECALL:
1711     case SPAN:
1712       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1713       SysPrintf("Disabled speculative precompilation\n");
1714       stop_after_jal=1;
1715       break;
1716     case IMM16:
1717       imm16_alloc(current,i);
1718       break;
1719     case LOAD:
1720     case LOADLR:
1721       load_alloc(current,i);
1722       break;
1723     case STORE:
1724     case STORELR:
1725       store_alloc(current,i);
1726       break;
1727     case ALU:
1728       alu_alloc(current,i);
1729       break;
1730     case SHIFT:
1731       shift_alloc(current,i);
1732       break;
1733     case MULTDIV:
1734       multdiv_alloc(current,i);
1735       break;
1736     case SHIFTIMM:
1737       shiftimm_alloc(current,i);
1738       break;
1739     case MOV:
1740       mov_alloc(current,i);
1741       break;
1742     case COP0:
1743       cop0_alloc(current,i);
1744       break;
1745     case COP1:
1746     case COP2:
1747       cop1_alloc(current,i);
1748       break;
1749     case C1LS:
1750       c1ls_alloc(current,i);
1751       break;
1752     case C2LS:
1753       c2ls_alloc(current,i);
1754       break;
1755     case FCONV:
1756       fconv_alloc(current,i);
1757       break;
1758     case FLOAT:
1759       float_alloc(current,i);
1760       break;
1761     case FCOMP:
1762       fcomp_alloc(current,i);
1763       break;
1764     case C2OP:
1765       c2op_alloc(current,i);
1766       break;
1767   }
1768 }
1769
1770 // Special case where a branch and delay slot span two pages in virtual memory
1771 static void pagespan_alloc(struct regstat *current,int i)
1772 {
1773   current->isconst=0;
1774   current->wasconst=0;
1775   regs[i].wasconst=0;
1776   minimum_free_regs[i]=HOST_REGS;
1777   alloc_all(current,i);
1778   alloc_cc(current,i);
1779   dirty_reg(current,CCREG);
1780   if(opcode[i]==3) // JAL
1781   {
1782     alloc_reg(current,i,31);
1783     dirty_reg(current,31);
1784   }
1785   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1786   {
1787     alloc_reg(current,i,rs1[i]);
1788     if (rt1[i]!=0) {
1789       alloc_reg(current,i,rt1[i]);
1790       dirty_reg(current,rt1[i]);
1791     }
1792   }
1793   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1794   {
1795     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1796     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1797     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1798     {
1799       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1800       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1801     }
1802   }
1803   else
1804   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1805   {
1806     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1807     if(!((current->is32>>rs1[i])&1))
1808     {
1809       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1810     }
1811   }
1812   else
1813   if(opcode[i]==0x11) // BC1
1814   {
1815     alloc_reg(current,i,FSREG);
1816     alloc_reg(current,i,CSREG);
1817   }
1818   //else ...
1819 }
1820
1821 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1822 {
1823   stubs[stubcount][0]=type;
1824   stubs[stubcount][1]=addr;
1825   stubs[stubcount][2]=retaddr;
1826   stubs[stubcount][3]=a;
1827   stubs[stubcount][4]=b;
1828   stubs[stubcount][5]=c;
1829   stubs[stubcount][6]=d;
1830   stubs[stubcount][7]=e;
1831   stubcount++;
1832 }
1833
1834 // Write out a single register
1835 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1836 {
1837   int hr;
1838   for(hr=0;hr<HOST_REGS;hr++) {
1839     if(hr!=EXCLUDE_REG) {
1840       if((regmap[hr]&63)==r) {
1841         if((dirty>>hr)&1) {
1842           if(regmap[hr]<64) {
1843             emit_storereg(r,hr);
1844           }else{
1845             emit_storereg(r|64,hr);
1846           }
1847         }
1848       }
1849     }
1850   }
1851 }
1852
1853 int mchecksum()
1854 {
1855   //if(!tracedebug) return 0;
1856   int i;
1857   int sum=0;
1858   for(i=0;i<2097152;i++) {
1859     unsigned int temp=sum;
1860     sum<<=1;
1861     sum|=(~temp)>>31;
1862     sum^=((u_int *)rdram)[i];
1863   }
1864   return sum;
1865 }
1866 int rchecksum()
1867 {
1868   int i;
1869   int sum=0;
1870   for(i=0;i<64;i++)
1871     sum^=((u_int *)reg)[i];
1872   return sum;
1873 }
1874 void rlist()
1875 {
1876   int i;
1877   printf("TRACE: ");
1878   for(i=0;i<32;i++)
1879     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
1880   printf("\n");
1881 }
1882
1883 void enabletrace()
1884 {
1885   tracedebug=1;
1886 }
1887
1888 void memdebug(int i)
1889 {
1890   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
1891   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
1892   //rlist();
1893   //if(tracedebug) {
1894   //if(Count>=-2084597794) {
1895   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
1896   //if(0) {
1897     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
1898     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
1899     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
1900     rlist();
1901     #ifdef __i386__
1902     printf("TRACE: %x\n",(&i)[-1]);
1903     #endif
1904     #ifdef __arm__
1905     int j;
1906     printf("TRACE: %x \n",(&j)[10]);
1907     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
1908     #endif
1909     //fflush(stdout);
1910   }
1911   //printf("TRACE: %x\n",(&i)[-1]);
1912 }
1913
1914 void alu_assemble(int i,struct regstat *i_regs)
1915 {
1916   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1917     if(rt1[i]) {
1918       signed char s1,s2,t;
1919       t=get_reg(i_regs->regmap,rt1[i]);
1920       if(t>=0) {
1921         s1=get_reg(i_regs->regmap,rs1[i]);
1922         s2=get_reg(i_regs->regmap,rs2[i]);
1923         if(rs1[i]&&rs2[i]) {
1924           assert(s1>=0);
1925           assert(s2>=0);
1926           if(opcode2[i]&2) emit_sub(s1,s2,t);
1927           else emit_add(s1,s2,t);
1928         }
1929         else if(rs1[i]) {
1930           if(s1>=0) emit_mov(s1,t);
1931           else emit_loadreg(rs1[i],t);
1932         }
1933         else if(rs2[i]) {
1934           if(s2>=0) {
1935             if(opcode2[i]&2) emit_neg(s2,t);
1936             else emit_mov(s2,t);
1937           }
1938           else {
1939             emit_loadreg(rs2[i],t);
1940             if(opcode2[i]&2) emit_neg(t,t);
1941           }
1942         }
1943         else emit_zeroreg(t);
1944       }
1945     }
1946   }
1947   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1948     if(rt1[i]) {
1949       signed char s1l,s2l,s1h,s2h,tl,th;
1950       tl=get_reg(i_regs->regmap,rt1[i]);
1951       th=get_reg(i_regs->regmap,rt1[i]|64);
1952       if(tl>=0) {
1953         s1l=get_reg(i_regs->regmap,rs1[i]);
1954         s2l=get_reg(i_regs->regmap,rs2[i]);
1955         s1h=get_reg(i_regs->regmap,rs1[i]|64);
1956         s2h=get_reg(i_regs->regmap,rs2[i]|64);
1957         if(rs1[i]&&rs2[i]) {
1958           assert(s1l>=0);
1959           assert(s2l>=0);
1960           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
1961           else emit_adds(s1l,s2l,tl);
1962           if(th>=0) {
1963             #ifdef INVERTED_CARRY
1964             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
1965             #else
1966             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
1967             #endif
1968             else emit_add(s1h,s2h,th);
1969           }
1970         }
1971         else if(rs1[i]) {
1972           if(s1l>=0) emit_mov(s1l,tl);
1973           else emit_loadreg(rs1[i],tl);
1974           if(th>=0) {
1975             if(s1h>=0) emit_mov(s1h,th);
1976             else emit_loadreg(rs1[i]|64,th);
1977           }
1978         }
1979         else if(rs2[i]) {
1980           if(s2l>=0) {
1981             if(opcode2[i]&2) emit_negs(s2l,tl);
1982             else emit_mov(s2l,tl);
1983           }
1984           else {
1985             emit_loadreg(rs2[i],tl);
1986             if(opcode2[i]&2) emit_negs(tl,tl);
1987           }
1988           if(th>=0) {
1989             #ifdef INVERTED_CARRY
1990             if(s2h>=0) emit_mov(s2h,th);
1991             else emit_loadreg(rs2[i]|64,th);
1992             if(opcode2[i]&2) {
1993               emit_adcimm(-1,th); // x86 has inverted carry flag
1994               emit_not(th,th);
1995             }
1996             #else
1997             if(opcode2[i]&2) {
1998               if(s2h>=0) emit_rscimm(s2h,0,th);
1999               else {
2000                 emit_loadreg(rs2[i]|64,th);
2001                 emit_rscimm(th,0,th);
2002               }
2003             }else{
2004               if(s2h>=0) emit_mov(s2h,th);
2005               else emit_loadreg(rs2[i]|64,th);
2006             }
2007             #endif
2008           }
2009         }
2010         else {
2011           emit_zeroreg(tl);
2012           if(th>=0) emit_zeroreg(th);
2013         }
2014       }
2015     }
2016   }
2017   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2018     if(rt1[i]) {
2019       signed char s1l,s1h,s2l,s2h,t;
2020       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2021       {
2022         t=get_reg(i_regs->regmap,rt1[i]);
2023         //assert(t>=0);
2024         if(t>=0) {
2025           s1l=get_reg(i_regs->regmap,rs1[i]);
2026           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2027           s2l=get_reg(i_regs->regmap,rs2[i]);
2028           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2029           if(rs2[i]==0) // rx<r0
2030           {
2031             assert(s1h>=0);
2032             if(opcode2[i]==0x2a) // SLT
2033               emit_shrimm(s1h,31,t);
2034             else // SLTU (unsigned can not be less than zero)
2035               emit_zeroreg(t);
2036           }
2037           else if(rs1[i]==0) // r0<rx
2038           {
2039             assert(s2h>=0);
2040             if(opcode2[i]==0x2a) // SLT
2041               emit_set_gz64_32(s2h,s2l,t);
2042             else // SLTU (set if not zero)
2043               emit_set_nz64_32(s2h,s2l,t);
2044           }
2045           else {
2046             assert(s1l>=0);assert(s1h>=0);
2047             assert(s2l>=0);assert(s2h>=0);
2048             if(opcode2[i]==0x2a) // SLT
2049               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2050             else // SLTU
2051               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2052           }
2053         }
2054       } else {
2055         t=get_reg(i_regs->regmap,rt1[i]);
2056         //assert(t>=0);
2057         if(t>=0) {
2058           s1l=get_reg(i_regs->regmap,rs1[i]);
2059           s2l=get_reg(i_regs->regmap,rs2[i]);
2060           if(rs2[i]==0) // rx<r0
2061           {
2062             assert(s1l>=0);
2063             if(opcode2[i]==0x2a) // SLT
2064               emit_shrimm(s1l,31,t);
2065             else // SLTU (unsigned can not be less than zero)
2066               emit_zeroreg(t);
2067           }
2068           else if(rs1[i]==0) // r0<rx
2069           {
2070             assert(s2l>=0);
2071             if(opcode2[i]==0x2a) // SLT
2072               emit_set_gz32(s2l,t);
2073             else // SLTU (set if not zero)
2074               emit_set_nz32(s2l,t);
2075           }
2076           else{
2077             assert(s1l>=0);assert(s2l>=0);
2078             if(opcode2[i]==0x2a) // SLT
2079               emit_set_if_less32(s1l,s2l,t);
2080             else // SLTU
2081               emit_set_if_carry32(s1l,s2l,t);
2082           }
2083         }
2084       }
2085     }
2086   }
2087   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2088     if(rt1[i]) {
2089       signed char s1l,s1h,s2l,s2h,th,tl;
2090       tl=get_reg(i_regs->regmap,rt1[i]);
2091       th=get_reg(i_regs->regmap,rt1[i]|64);
2092       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2093       {
2094         assert(tl>=0);
2095         if(tl>=0) {
2096           s1l=get_reg(i_regs->regmap,rs1[i]);
2097           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2098           s2l=get_reg(i_regs->regmap,rs2[i]);
2099           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2100           if(rs1[i]&&rs2[i]) {
2101             assert(s1l>=0);assert(s1h>=0);
2102             assert(s2l>=0);assert(s2h>=0);
2103             if(opcode2[i]==0x24) { // AND
2104               emit_and(s1l,s2l,tl);
2105               emit_and(s1h,s2h,th);
2106             } else
2107             if(opcode2[i]==0x25) { // OR
2108               emit_or(s1l,s2l,tl);
2109               emit_or(s1h,s2h,th);
2110             } else
2111             if(opcode2[i]==0x26) { // XOR
2112               emit_xor(s1l,s2l,tl);
2113               emit_xor(s1h,s2h,th);
2114             } else
2115             if(opcode2[i]==0x27) { // NOR
2116               emit_or(s1l,s2l,tl);
2117               emit_or(s1h,s2h,th);
2118               emit_not(tl,tl);
2119               emit_not(th,th);
2120             }
2121           }
2122           else
2123           {
2124             if(opcode2[i]==0x24) { // AND
2125               emit_zeroreg(tl);
2126               emit_zeroreg(th);
2127             } else
2128             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2129               if(rs1[i]){
2130                 if(s1l>=0) emit_mov(s1l,tl);
2131                 else emit_loadreg(rs1[i],tl);
2132                 if(s1h>=0) emit_mov(s1h,th);
2133                 else emit_loadreg(rs1[i]|64,th);
2134               }
2135               else
2136               if(rs2[i]){
2137                 if(s2l>=0) emit_mov(s2l,tl);
2138                 else emit_loadreg(rs2[i],tl);
2139                 if(s2h>=0) emit_mov(s2h,th);
2140                 else emit_loadreg(rs2[i]|64,th);
2141               }
2142               else{
2143                 emit_zeroreg(tl);
2144                 emit_zeroreg(th);
2145               }
2146             } else
2147             if(opcode2[i]==0x27) { // NOR
2148               if(rs1[i]){
2149                 if(s1l>=0) emit_not(s1l,tl);
2150                 else{
2151                   emit_loadreg(rs1[i],tl);
2152                   emit_not(tl,tl);
2153                 }
2154                 if(s1h>=0) emit_not(s1h,th);
2155                 else{
2156                   emit_loadreg(rs1[i]|64,th);
2157                   emit_not(th,th);
2158                 }
2159               }
2160               else
2161               if(rs2[i]){
2162                 if(s2l>=0) emit_not(s2l,tl);
2163                 else{
2164                   emit_loadreg(rs2[i],tl);
2165                   emit_not(tl,tl);
2166                 }
2167                 if(s2h>=0) emit_not(s2h,th);
2168                 else{
2169                   emit_loadreg(rs2[i]|64,th);
2170                   emit_not(th,th);
2171                 }
2172               }
2173               else {
2174                 emit_movimm(-1,tl);
2175                 emit_movimm(-1,th);
2176               }
2177             }
2178           }
2179         }
2180       }
2181       else
2182       {
2183         // 32 bit
2184         if(tl>=0) {
2185           s1l=get_reg(i_regs->regmap,rs1[i]);
2186           s2l=get_reg(i_regs->regmap,rs2[i]);
2187           if(rs1[i]&&rs2[i]) {
2188             assert(s1l>=0);
2189             assert(s2l>=0);
2190             if(opcode2[i]==0x24) { // AND
2191               emit_and(s1l,s2l,tl);
2192             } else
2193             if(opcode2[i]==0x25) { // OR
2194               emit_or(s1l,s2l,tl);
2195             } else
2196             if(opcode2[i]==0x26) { // XOR
2197               emit_xor(s1l,s2l,tl);
2198             } else
2199             if(opcode2[i]==0x27) { // NOR
2200               emit_or(s1l,s2l,tl);
2201               emit_not(tl,tl);
2202             }
2203           }
2204           else
2205           {
2206             if(opcode2[i]==0x24) { // AND
2207               emit_zeroreg(tl);
2208             } else
2209             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2210               if(rs1[i]){
2211                 if(s1l>=0) emit_mov(s1l,tl);
2212                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2213               }
2214               else
2215               if(rs2[i]){
2216                 if(s2l>=0) emit_mov(s2l,tl);
2217                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2218               }
2219               else emit_zeroreg(tl);
2220             } else
2221             if(opcode2[i]==0x27) { // NOR
2222               if(rs1[i]){
2223                 if(s1l>=0) emit_not(s1l,tl);
2224                 else {
2225                   emit_loadreg(rs1[i],tl);
2226                   emit_not(tl,tl);
2227                 }
2228               }
2229               else
2230               if(rs2[i]){
2231                 if(s2l>=0) emit_not(s2l,tl);
2232                 else {
2233                   emit_loadreg(rs2[i],tl);
2234                   emit_not(tl,tl);
2235                 }
2236               }
2237               else emit_movimm(-1,tl);
2238             }
2239           }
2240         }
2241       }
2242     }
2243   }
2244 }
2245
2246 void imm16_assemble(int i,struct regstat *i_regs)
2247 {
2248   if (opcode[i]==0x0f) { // LUI
2249     if(rt1[i]) {
2250       signed char t;
2251       t=get_reg(i_regs->regmap,rt1[i]);
2252       //assert(t>=0);
2253       if(t>=0) {
2254         if(!((i_regs->isconst>>t)&1))
2255           emit_movimm(imm[i]<<16,t);
2256       }
2257     }
2258   }
2259   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2260     if(rt1[i]) {
2261       signed char s,t;
2262       t=get_reg(i_regs->regmap,rt1[i]);
2263       s=get_reg(i_regs->regmap,rs1[i]);
2264       if(rs1[i]) {
2265         //assert(t>=0);
2266         //assert(s>=0);
2267         if(t>=0) {
2268           if(!((i_regs->isconst>>t)&1)) {
2269             if(s<0) {
2270               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2271               emit_addimm(t,imm[i],t);
2272             }else{
2273               if(!((i_regs->wasconst>>s)&1))
2274                 emit_addimm(s,imm[i],t);
2275               else
2276                 emit_movimm(constmap[i][s]+imm[i],t);
2277             }
2278           }
2279         }
2280       } else {
2281         if(t>=0) {
2282           if(!((i_regs->isconst>>t)&1))
2283             emit_movimm(imm[i],t);
2284         }
2285       }
2286     }
2287   }
2288   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2289     if(rt1[i]) {
2290       signed char sh,sl,th,tl;
2291       th=get_reg(i_regs->regmap,rt1[i]|64);
2292       tl=get_reg(i_regs->regmap,rt1[i]);
2293       sh=get_reg(i_regs->regmap,rs1[i]|64);
2294       sl=get_reg(i_regs->regmap,rs1[i]);
2295       if(tl>=0) {
2296         if(rs1[i]) {
2297           assert(sh>=0);
2298           assert(sl>=0);
2299           if(th>=0) {
2300             emit_addimm64_32(sh,sl,imm[i],th,tl);
2301           }
2302           else {
2303             emit_addimm(sl,imm[i],tl);
2304           }
2305         } else {
2306           emit_movimm(imm[i],tl);
2307           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2308         }
2309       }
2310     }
2311   }
2312   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2313     if(rt1[i]) {
2314       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2315       signed char sh,sl,t;
2316       t=get_reg(i_regs->regmap,rt1[i]);
2317       sh=get_reg(i_regs->regmap,rs1[i]|64);
2318       sl=get_reg(i_regs->regmap,rs1[i]);
2319       //assert(t>=0);
2320       if(t>=0) {
2321         if(rs1[i]>0) {
2322           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2323           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2324             if(opcode[i]==0x0a) { // SLTI
2325               if(sl<0) {
2326                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2327                 emit_slti32(t,imm[i],t);
2328               }else{
2329                 emit_slti32(sl,imm[i],t);
2330               }
2331             }
2332             else { // SLTIU
2333               if(sl<0) {
2334                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2335                 emit_sltiu32(t,imm[i],t);
2336               }else{
2337                 emit_sltiu32(sl,imm[i],t);
2338               }
2339             }
2340           }else{ // 64-bit
2341             assert(sl>=0);
2342             if(opcode[i]==0x0a) // SLTI
2343               emit_slti64_32(sh,sl,imm[i],t);
2344             else // SLTIU
2345               emit_sltiu64_32(sh,sl,imm[i],t);
2346           }
2347         }else{
2348           // SLTI(U) with r0 is just stupid,
2349           // nonetheless examples can be found
2350           if(opcode[i]==0x0a) // SLTI
2351             if(0<imm[i]) emit_movimm(1,t);
2352             else emit_zeroreg(t);
2353           else // SLTIU
2354           {
2355             if(imm[i]) emit_movimm(1,t);
2356             else emit_zeroreg(t);
2357           }
2358         }
2359       }
2360     }
2361   }
2362   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2363     if(rt1[i]) {
2364       signed char sh,sl,th,tl;
2365       th=get_reg(i_regs->regmap,rt1[i]|64);
2366       tl=get_reg(i_regs->regmap,rt1[i]);
2367       sh=get_reg(i_regs->regmap,rs1[i]|64);
2368       sl=get_reg(i_regs->regmap,rs1[i]);
2369       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2370         if(opcode[i]==0x0c) //ANDI
2371         {
2372           if(rs1[i]) {
2373             if(sl<0) {
2374               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2375               emit_andimm(tl,imm[i],tl);
2376             }else{
2377               if(!((i_regs->wasconst>>sl)&1))
2378                 emit_andimm(sl,imm[i],tl);
2379               else
2380                 emit_movimm(constmap[i][sl]&imm[i],tl);
2381             }
2382           }
2383           else
2384             emit_zeroreg(tl);
2385           if(th>=0) emit_zeroreg(th);
2386         }
2387         else
2388         {
2389           if(rs1[i]) {
2390             if(sl<0) {
2391               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2392             }
2393             if(th>=0) {
2394               if(sh<0) {
2395                 emit_loadreg(rs1[i]|64,th);
2396               }else{
2397                 emit_mov(sh,th);
2398               }
2399             }
2400             if(opcode[i]==0x0d) { // ORI
2401               if(sl<0) {
2402                 emit_orimm(tl,imm[i],tl);
2403               }else{
2404                 if(!((i_regs->wasconst>>sl)&1))
2405                   emit_orimm(sl,imm[i],tl);
2406                 else
2407                   emit_movimm(constmap[i][sl]|imm[i],tl);
2408               }
2409             }
2410             if(opcode[i]==0x0e) { // XORI
2411               if(sl<0) {
2412                 emit_xorimm(tl,imm[i],tl);
2413               }else{
2414                 if(!((i_regs->wasconst>>sl)&1))
2415                   emit_xorimm(sl,imm[i],tl);
2416                 else
2417                   emit_movimm(constmap[i][sl]^imm[i],tl);
2418               }
2419             }
2420           }
2421           else {
2422             emit_movimm(imm[i],tl);
2423             if(th>=0) emit_zeroreg(th);
2424           }
2425         }
2426       }
2427     }
2428   }
2429 }
2430
2431 void shiftimm_assemble(int i,struct regstat *i_regs)
2432 {
2433   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2434   {
2435     if(rt1[i]) {
2436       signed char s,t;
2437       t=get_reg(i_regs->regmap,rt1[i]);
2438       s=get_reg(i_regs->regmap,rs1[i]);
2439       //assert(t>=0);
2440       if(t>=0&&!((i_regs->isconst>>t)&1)){
2441         if(rs1[i]==0)
2442         {
2443           emit_zeroreg(t);
2444         }
2445         else
2446         {
2447           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2448           if(imm[i]) {
2449             if(opcode2[i]==0) // SLL
2450             {
2451               emit_shlimm(s<0?t:s,imm[i],t);
2452             }
2453             if(opcode2[i]==2) // SRL
2454             {
2455               emit_shrimm(s<0?t:s,imm[i],t);
2456             }
2457             if(opcode2[i]==3) // SRA
2458             {
2459               emit_sarimm(s<0?t:s,imm[i],t);
2460             }
2461           }else{
2462             // Shift by zero
2463             if(s>=0 && s!=t) emit_mov(s,t);
2464           }
2465         }
2466       }
2467       //emit_storereg(rt1[i],t); //DEBUG
2468     }
2469   }
2470   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2471   {
2472     if(rt1[i]) {
2473       signed char sh,sl,th,tl;
2474       th=get_reg(i_regs->regmap,rt1[i]|64);
2475       tl=get_reg(i_regs->regmap,rt1[i]);
2476       sh=get_reg(i_regs->regmap,rs1[i]|64);
2477       sl=get_reg(i_regs->regmap,rs1[i]);
2478       if(tl>=0) {
2479         if(rs1[i]==0)
2480         {
2481           emit_zeroreg(tl);
2482           if(th>=0) emit_zeroreg(th);
2483         }
2484         else
2485         {
2486           assert(sl>=0);
2487           assert(sh>=0);
2488           if(imm[i]) {
2489             if(opcode2[i]==0x38) // DSLL
2490             {
2491               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2492               emit_shlimm(sl,imm[i],tl);
2493             }
2494             if(opcode2[i]==0x3a) // DSRL
2495             {
2496               emit_shrdimm(sl,sh,imm[i],tl);
2497               if(th>=0) emit_shrimm(sh,imm[i],th);
2498             }
2499             if(opcode2[i]==0x3b) // DSRA
2500             {
2501               emit_shrdimm(sl,sh,imm[i],tl);
2502               if(th>=0) emit_sarimm(sh,imm[i],th);
2503             }
2504           }else{
2505             // Shift by zero
2506             if(sl!=tl) emit_mov(sl,tl);
2507             if(th>=0&&sh!=th) emit_mov(sh,th);
2508           }
2509         }
2510       }
2511     }
2512   }
2513   if(opcode2[i]==0x3c) // DSLL32
2514   {
2515     if(rt1[i]) {
2516       signed char sl,tl,th;
2517       tl=get_reg(i_regs->regmap,rt1[i]);
2518       th=get_reg(i_regs->regmap,rt1[i]|64);
2519       sl=get_reg(i_regs->regmap,rs1[i]);
2520       if(th>=0||tl>=0){
2521         assert(tl>=0);
2522         assert(th>=0);
2523         assert(sl>=0);
2524         emit_mov(sl,th);
2525         emit_zeroreg(tl);
2526         if(imm[i]>32)
2527         {
2528           emit_shlimm(th,imm[i]&31,th);
2529         }
2530       }
2531     }
2532   }
2533   if(opcode2[i]==0x3e) // DSRL32
2534   {
2535     if(rt1[i]) {
2536       signed char sh,tl,th;
2537       tl=get_reg(i_regs->regmap,rt1[i]);
2538       th=get_reg(i_regs->regmap,rt1[i]|64);
2539       sh=get_reg(i_regs->regmap,rs1[i]|64);
2540       if(tl>=0){
2541         assert(sh>=0);
2542         emit_mov(sh,tl);
2543         if(th>=0) emit_zeroreg(th);
2544         if(imm[i]>32)
2545         {
2546           emit_shrimm(tl,imm[i]&31,tl);
2547         }
2548       }
2549     }
2550   }
2551   if(opcode2[i]==0x3f) // DSRA32
2552   {
2553     if(rt1[i]) {
2554       signed char sh,tl;
2555       tl=get_reg(i_regs->regmap,rt1[i]);
2556       sh=get_reg(i_regs->regmap,rs1[i]|64);
2557       if(tl>=0){
2558         assert(sh>=0);
2559         emit_mov(sh,tl);
2560         if(imm[i]>32)
2561         {
2562           emit_sarimm(tl,imm[i]&31,tl);
2563         }
2564       }
2565     }
2566   }
2567 }
2568
2569 #ifndef shift_assemble
2570 void shift_assemble(int i,struct regstat *i_regs)
2571 {
2572   printf("Need shift_assemble for this architecture.\n");
2573   exit(1);
2574 }
2575 #endif
2576
2577 void load_assemble(int i,struct regstat *i_regs)
2578 {
2579   int s,th,tl,addr,map=-1;
2580   int offset;
2581   int jaddr=0;
2582   int memtarget=0,c=0;
2583   int fastload_reg_override=0;
2584   u_int hr,reglist=0;
2585   th=get_reg(i_regs->regmap,rt1[i]|64);
2586   tl=get_reg(i_regs->regmap,rt1[i]);
2587   s=get_reg(i_regs->regmap,rs1[i]);
2588   offset=imm[i];
2589   for(hr=0;hr<HOST_REGS;hr++) {
2590     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2591   }
2592   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2593   if(s>=0) {
2594     c=(i_regs->wasconst>>s)&1;
2595     if (c) {
2596       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2597     }
2598   }
2599   //printf("load_assemble: c=%d\n",c);
2600   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2601   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2602   if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2603     ||rt1[i]==0) {
2604       // could be FIFO, must perform the read
2605       // ||dummy read
2606       assem_debug("(forced read)\n");
2607       tl=get_reg(i_regs->regmap,-1);
2608       assert(tl>=0);
2609   }
2610   if(offset||s<0||c) addr=tl;
2611   else addr=s;
2612   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2613  if(tl>=0) {
2614   //printf("load_assemble: c=%d\n",c);
2615   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2616   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2617   reglist&=~(1<<tl);
2618   if(th>=0) reglist&=~(1<<th);
2619   if(!c) {
2620     #ifdef RAM_OFFSET
2621     map=get_reg(i_regs->regmap,ROREG);
2622     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2623     #endif
2624     #ifdef R29_HACK
2625     // Strmnnrmn's speed hack
2626     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2627     #endif
2628     {
2629       jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2630     }
2631   }
2632   else if(ram_offset&&memtarget) {
2633     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2634     fastload_reg_override=HOST_TEMPREG;
2635   }
2636   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2637   if (opcode[i]==0x20) { // LB
2638     if(!c||memtarget) {
2639       if(!dummy) {
2640         #ifdef HOST_IMM_ADDR32
2641         if(c)
2642           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2643         else
2644         #endif
2645         {
2646           //emit_xorimm(addr,3,tl);
2647           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2648           int x=0,a=tl;
2649 #ifdef BIG_ENDIAN_MIPS
2650           if(!c) emit_xorimm(addr,3,tl);
2651           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2652 #else
2653           if(!c) a=addr;
2654 #endif
2655           if(fastload_reg_override) a=fastload_reg_override;
2656
2657           emit_movsbl_indexed_tlb(x,a,map,tl);
2658         }
2659       }
2660       if(jaddr)
2661         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2662     }
2663     else
2664       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2665   }
2666   if (opcode[i]==0x21) { // LH
2667     if(!c||memtarget) {
2668       if(!dummy) {
2669         #ifdef HOST_IMM_ADDR32
2670         if(c)
2671           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2672         else
2673         #endif
2674         {
2675           int x=0,a=tl;
2676 #ifdef BIG_ENDIAN_MIPS
2677           if(!c) emit_xorimm(addr,2,tl);
2678           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2679 #else
2680           if(!c) a=addr;
2681 #endif
2682           if(fastload_reg_override) a=fastload_reg_override;
2683           //#ifdef
2684           //emit_movswl_indexed_tlb(x,tl,map,tl);
2685           //else
2686           if(map>=0) {
2687             emit_movswl_indexed(x,a,tl);
2688           }else{
2689             #if 1 //def RAM_OFFSET
2690             emit_movswl_indexed(x,a,tl);
2691             #else
2692             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2693             #endif
2694           }
2695         }
2696       }
2697       if(jaddr)
2698         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2699     }
2700     else
2701       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2702   }
2703   if (opcode[i]==0x23) { // LW
2704     if(!c||memtarget) {
2705       if(!dummy) {
2706         int a=addr;
2707         if(fastload_reg_override) a=fastload_reg_override;
2708         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2709         #ifdef HOST_IMM_ADDR32
2710         if(c)
2711           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2712         else
2713         #endif
2714         emit_readword_indexed_tlb(0,a,map,tl);
2715       }
2716       if(jaddr)
2717         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2718     }
2719     else
2720       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2721   }
2722   if (opcode[i]==0x24) { // LBU
2723     if(!c||memtarget) {
2724       if(!dummy) {
2725         #ifdef HOST_IMM_ADDR32
2726         if(c)
2727           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2728         else
2729         #endif
2730         {
2731           //emit_xorimm(addr,3,tl);
2732           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2733           int x=0,a=tl;
2734 #ifdef BIG_ENDIAN_MIPS
2735           if(!c) emit_xorimm(addr,3,tl);
2736           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2737 #else
2738           if(!c) a=addr;
2739 #endif
2740           if(fastload_reg_override) a=fastload_reg_override;
2741
2742           emit_movzbl_indexed_tlb(x,a,map,tl);
2743         }
2744       }
2745       if(jaddr)
2746         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2747     }
2748     else
2749       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2750   }
2751   if (opcode[i]==0x25) { // LHU
2752     if(!c||memtarget) {
2753       if(!dummy) {
2754         #ifdef HOST_IMM_ADDR32
2755         if(c)
2756           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2757         else
2758         #endif
2759         {
2760           int x=0,a=tl;
2761 #ifdef BIG_ENDIAN_MIPS
2762           if(!c) emit_xorimm(addr,2,tl);
2763           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2764 #else
2765           if(!c) a=addr;
2766 #endif
2767           if(fastload_reg_override) a=fastload_reg_override;
2768           //#ifdef
2769           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2770           //#else
2771           if(map>=0) {
2772             emit_movzwl_indexed(x,a,tl);
2773           }else{
2774             #if 1 //def RAM_OFFSET
2775             emit_movzwl_indexed(x,a,tl);
2776             #else
2777             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2778             #endif
2779           }
2780         }
2781       }
2782       if(jaddr)
2783         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2784     }
2785     else
2786       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2787   }
2788   if (opcode[i]==0x27) { // LWU
2789     assert(th>=0);
2790     if(!c||memtarget) {
2791       if(!dummy) {
2792         int a=addr;
2793         if(fastload_reg_override) a=fastload_reg_override;
2794         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2795         #ifdef HOST_IMM_ADDR32
2796         if(c)
2797           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2798         else
2799         #endif
2800         emit_readword_indexed_tlb(0,a,map,tl);
2801       }
2802       if(jaddr)
2803         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2804     }
2805     else {
2806       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2807     }
2808     emit_zeroreg(th);
2809   }
2810   if (opcode[i]==0x37) { // LD
2811     if(!c||memtarget) {
2812       if(!dummy) {
2813         int a=addr;
2814         if(fastload_reg_override) a=fastload_reg_override;
2815         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2816         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2817         #ifdef HOST_IMM_ADDR32
2818         if(c)
2819           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2820         else
2821         #endif
2822         emit_readdword_indexed_tlb(0,a,map,th,tl);
2823       }
2824       if(jaddr)
2825         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2826     }
2827     else
2828       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2829   }
2830  }
2831   //emit_storereg(rt1[i],tl); // DEBUG
2832   //if(opcode[i]==0x23)
2833   //if(opcode[i]==0x24)
2834   //if(opcode[i]==0x23||opcode[i]==0x24)
2835   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2836   {
2837     //emit_pusha();
2838     save_regs(0x100f);
2839         emit_readword((int)&last_count,ECX);
2840         #ifdef __i386__
2841         if(get_reg(i_regs->regmap,CCREG)<0)
2842           emit_loadreg(CCREG,HOST_CCREG);
2843         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2844         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2845         emit_writeword(HOST_CCREG,(int)&Count);
2846         #endif
2847         #ifdef __arm__
2848         if(get_reg(i_regs->regmap,CCREG)<0)
2849           emit_loadreg(CCREG,0);
2850         else
2851           emit_mov(HOST_CCREG,0);
2852         emit_add(0,ECX,0);
2853         emit_addimm(0,2*ccadj[i],0);
2854         emit_writeword(0,(int)&Count);
2855         #endif
2856     emit_call((int)memdebug);
2857     //emit_popa();
2858     restore_regs(0x100f);
2859   }*/
2860 }
2861
2862 #ifndef loadlr_assemble
2863 void loadlr_assemble(int i,struct regstat *i_regs)
2864 {
2865   printf("Need loadlr_assemble for this architecture.\n");
2866   exit(1);
2867 }
2868 #endif
2869
2870 void store_assemble(int i,struct regstat *i_regs)
2871 {
2872   int s,th,tl,map=-1;
2873   int addr,temp;
2874   int offset;
2875   int jaddr=0,type;
2876   int memtarget=0,c=0;
2877   int agr=AGEN1+(i&1);
2878   int faststore_reg_override=0;
2879   u_int hr,reglist=0;
2880   th=get_reg(i_regs->regmap,rs2[i]|64);
2881   tl=get_reg(i_regs->regmap,rs2[i]);
2882   s=get_reg(i_regs->regmap,rs1[i]);
2883   temp=get_reg(i_regs->regmap,agr);
2884   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2885   offset=imm[i];
2886   if(s>=0) {
2887     c=(i_regs->wasconst>>s)&1;
2888     if(c) {
2889       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2890     }
2891   }
2892   assert(tl>=0);
2893   assert(temp>=0);
2894   for(hr=0;hr<HOST_REGS;hr++) {
2895     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2896   }
2897   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2898   if(offset||s<0||c) addr=temp;
2899   else addr=s;
2900   if(!c) {
2901     jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2902   }
2903   else if(ram_offset&&memtarget) {
2904     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2905     faststore_reg_override=HOST_TEMPREG;
2906   }
2907
2908   if (opcode[i]==0x28) { // SB
2909     if(!c||memtarget) {
2910       int x=0,a=temp;
2911 #ifdef BIG_ENDIAN_MIPS
2912       if(!c) emit_xorimm(addr,3,temp);
2913       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2914 #else
2915       if(!c) a=addr;
2916 #endif
2917       if(faststore_reg_override) a=faststore_reg_override;
2918       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
2919       emit_writebyte_indexed_tlb(tl,x,a,map,a);
2920     }
2921     type=STOREB_STUB;
2922   }
2923   if (opcode[i]==0x29) { // SH
2924     if(!c||memtarget) {
2925       int x=0,a=temp;
2926 #ifdef BIG_ENDIAN_MIPS
2927       if(!c) emit_xorimm(addr,2,temp);
2928       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2929 #else
2930       if(!c) a=addr;
2931 #endif
2932       if(faststore_reg_override) a=faststore_reg_override;
2933       //#ifdef
2934       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
2935       //#else
2936       if(map>=0) {
2937         emit_writehword_indexed(tl,x,a);
2938       }else
2939         //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
2940         emit_writehword_indexed(tl,x,a);
2941     }
2942     type=STOREH_STUB;
2943   }
2944   if (opcode[i]==0x2B) { // SW
2945     if(!c||memtarget) {
2946       int a=addr;
2947       if(faststore_reg_override) a=faststore_reg_override;
2948       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
2949       emit_writeword_indexed_tlb(tl,0,a,map,temp);
2950     }
2951     type=STOREW_STUB;
2952   }
2953   if (opcode[i]==0x3F) { // SD
2954     if(!c||memtarget) {
2955       int a=addr;
2956       if(faststore_reg_override) a=faststore_reg_override;
2957       if(rs2[i]) {
2958         assert(th>=0);
2959         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
2960         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
2961         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
2962       }else{
2963         // Store zero
2964         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
2965         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
2966         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
2967       }
2968     }
2969     type=STORED_STUB;
2970   }
2971   if(jaddr) {
2972     // PCSX store handlers don't check invcode again
2973     reglist|=1<<addr;
2974     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2975     jaddr=0;
2976   }
2977   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2978     if(!c||memtarget) {
2979       #ifdef DESTRUCTIVE_SHIFT
2980       // The x86 shift operation is 'destructive'; it overwrites the
2981       // source register, so we need to make a copy first and use that.
2982       addr=temp;
2983       #endif
2984       #if defined(HOST_IMM8)
2985       int ir=get_reg(i_regs->regmap,INVCP);
2986       assert(ir>=0);
2987       emit_cmpmem_indexedsr12_reg(ir,addr,1);
2988       #else
2989       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
2990       #endif
2991       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2992       emit_callne(invalidate_addr_reg[addr]);
2993       #else
2994       int jaddr2=(int)out;
2995       emit_jne(0);
2996       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2997       #endif
2998     }
2999   }
3000   u_int addr_val=constmap[i][s]+offset;
3001   if(jaddr) {
3002     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3003   } else if(c&&!memtarget) {
3004     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3005   }
3006   // basic current block modification detection..
3007   // not looking back as that should be in mips cache already
3008   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3009     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3010     assert(i_regs->regmap==regs[i].regmap); // not delay slot
3011     if(i_regs->regmap==regs[i].regmap) {
3012       load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3013       wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3014       emit_movimm(start+i*4+4,0);
3015       emit_writeword(0,(int)&pcaddr);
3016       emit_jmp((int)do_interrupt);
3017     }
3018   }
3019   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3020   //if(opcode[i]==0x2B || opcode[i]==0x28)
3021   //if(opcode[i]==0x2B || opcode[i]==0x29)
3022   //if(opcode[i]==0x2B)
3023   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3024   {
3025     #ifdef __i386__
3026     emit_pusha();
3027     #endif
3028     #ifdef __arm__
3029     save_regs(0x100f);
3030     #endif
3031         emit_readword((int)&last_count,ECX);
3032         #ifdef __i386__
3033         if(get_reg(i_regs->regmap,CCREG)<0)
3034           emit_loadreg(CCREG,HOST_CCREG);
3035         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3036         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3037         emit_writeword(HOST_CCREG,(int)&Count);
3038         #endif
3039         #ifdef __arm__
3040         if(get_reg(i_regs->regmap,CCREG)<0)
3041           emit_loadreg(CCREG,0);
3042         else
3043           emit_mov(HOST_CCREG,0);
3044         emit_add(0,ECX,0);
3045         emit_addimm(0,2*ccadj[i],0);
3046         emit_writeword(0,(int)&Count);
3047         #endif
3048     emit_call((int)memdebug);
3049     #ifdef __i386__
3050     emit_popa();
3051     #endif
3052     #ifdef __arm__
3053     restore_regs(0x100f);
3054     #endif
3055   }*/
3056 }
3057
3058 void storelr_assemble(int i,struct regstat *i_regs)
3059 {
3060   int s,th,tl;
3061   int temp;
3062   int temp2=-1;
3063   int offset;
3064   int jaddr=0;
3065   int case1,case2,case3;
3066   int done0,done1,done2;
3067   int memtarget=0,c=0;
3068   int agr=AGEN1+(i&1);
3069   u_int hr,reglist=0;
3070   th=get_reg(i_regs->regmap,rs2[i]|64);
3071   tl=get_reg(i_regs->regmap,rs2[i]);
3072   s=get_reg(i_regs->regmap,rs1[i]);
3073   temp=get_reg(i_regs->regmap,agr);
3074   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3075   offset=imm[i];
3076   if(s>=0) {
3077     c=(i_regs->isconst>>s)&1;
3078     if(c) {
3079       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3080     }
3081   }
3082   assert(tl>=0);
3083   for(hr=0;hr<HOST_REGS;hr++) {
3084     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3085   }
3086   assert(temp>=0);
3087   if(!c) {
3088     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3089     if(!offset&&s!=temp) emit_mov(s,temp);
3090     jaddr=(int)out;
3091     emit_jno(0);
3092   }
3093   else
3094   {
3095     if(!memtarget||!rs1[i]) {
3096       jaddr=(int)out;
3097       emit_jmp(0);
3098     }
3099   }
3100   #ifdef RAM_OFFSET
3101   int map=get_reg(i_regs->regmap,ROREG);
3102   if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3103   #else
3104   if((u_int)rdram!=0x80000000)
3105     emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3106   #endif
3107
3108   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3109     temp2=get_reg(i_regs->regmap,FTEMP);
3110     if(!rs2[i]) temp2=th=tl;
3111   }
3112
3113 #ifndef BIG_ENDIAN_MIPS
3114     emit_xorimm(temp,3,temp);
3115 #endif
3116   emit_testimm(temp,2);
3117   case2=(int)out;
3118   emit_jne(0);
3119   emit_testimm(temp,1);
3120   case1=(int)out;
3121   emit_jne(0);
3122   // 0
3123   if (opcode[i]==0x2A) { // SWL
3124     emit_writeword_indexed(tl,0,temp);
3125   }
3126   if (opcode[i]==0x2E) { // SWR
3127     emit_writebyte_indexed(tl,3,temp);
3128   }
3129   if (opcode[i]==0x2C) { // SDL
3130     emit_writeword_indexed(th,0,temp);
3131     if(rs2[i]) emit_mov(tl,temp2);
3132   }
3133   if (opcode[i]==0x2D) { // SDR
3134     emit_writebyte_indexed(tl,3,temp);
3135     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3136   }
3137   done0=(int)out;
3138   emit_jmp(0);
3139   // 1
3140   set_jump_target(case1,(int)out);
3141   if (opcode[i]==0x2A) { // SWL
3142     // Write 3 msb into three least significant bytes
3143     if(rs2[i]) emit_rorimm(tl,8,tl);
3144     emit_writehword_indexed(tl,-1,temp);
3145     if(rs2[i]) emit_rorimm(tl,16,tl);
3146     emit_writebyte_indexed(tl,1,temp);
3147     if(rs2[i]) emit_rorimm(tl,8,tl);
3148   }
3149   if (opcode[i]==0x2E) { // SWR
3150     // Write two lsb into two most significant bytes
3151     emit_writehword_indexed(tl,1,temp);
3152   }
3153   if (opcode[i]==0x2C) { // SDL
3154     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3155     // Write 3 msb into three least significant bytes
3156     if(rs2[i]) emit_rorimm(th,8,th);
3157     emit_writehword_indexed(th,-1,temp);
3158     if(rs2[i]) emit_rorimm(th,16,th);
3159     emit_writebyte_indexed(th,1,temp);
3160     if(rs2[i]) emit_rorimm(th,8,th);
3161   }
3162   if (opcode[i]==0x2D) { // SDR
3163     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3164     // Write two lsb into two most significant bytes
3165     emit_writehword_indexed(tl,1,temp);
3166   }
3167   done1=(int)out;
3168   emit_jmp(0);
3169   // 2
3170   set_jump_target(case2,(int)out);
3171   emit_testimm(temp,1);
3172   case3=(int)out;
3173   emit_jne(0);
3174   if (opcode[i]==0x2A) { // SWL
3175     // Write two msb into two least significant bytes
3176     if(rs2[i]) emit_rorimm(tl,16,tl);
3177     emit_writehword_indexed(tl,-2,temp);
3178     if(rs2[i]) emit_rorimm(tl,16,tl);
3179   }
3180   if (opcode[i]==0x2E) { // SWR
3181     // Write 3 lsb into three most significant bytes
3182     emit_writebyte_indexed(tl,-1,temp);
3183     if(rs2[i]) emit_rorimm(tl,8,tl);
3184     emit_writehword_indexed(tl,0,temp);
3185     if(rs2[i]) emit_rorimm(tl,24,tl);
3186   }
3187   if (opcode[i]==0x2C) { // SDL
3188     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3189     // Write two msb into two least significant bytes
3190     if(rs2[i]) emit_rorimm(th,16,th);
3191     emit_writehword_indexed(th,-2,temp);
3192     if(rs2[i]) emit_rorimm(th,16,th);
3193   }
3194   if (opcode[i]==0x2D) { // SDR
3195     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3196     // Write 3 lsb into three most significant bytes
3197     emit_writebyte_indexed(tl,-1,temp);
3198     if(rs2[i]) emit_rorimm(tl,8,tl);
3199     emit_writehword_indexed(tl,0,temp);
3200     if(rs2[i]) emit_rorimm(tl,24,tl);
3201   }
3202   done2=(int)out;
3203   emit_jmp(0);
3204   // 3
3205   set_jump_target(case3,(int)out);
3206   if (opcode[i]==0x2A) { // SWL
3207     // Write msb into least significant byte
3208     if(rs2[i]) emit_rorimm(tl,24,tl);
3209     emit_writebyte_indexed(tl,-3,temp);
3210     if(rs2[i]) emit_rorimm(tl,8,tl);
3211   }
3212   if (opcode[i]==0x2E) { // SWR
3213     // Write entire word
3214     emit_writeword_indexed(tl,-3,temp);
3215   }
3216   if (opcode[i]==0x2C) { // SDL
3217     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3218     // Write msb into least significant byte
3219     if(rs2[i]) emit_rorimm(th,24,th);
3220     emit_writebyte_indexed(th,-3,temp);
3221     if(rs2[i]) emit_rorimm(th,8,th);
3222   }
3223   if (opcode[i]==0x2D) { // SDR
3224     if(rs2[i]) emit_mov(th,temp2);
3225     // Write entire word
3226     emit_writeword_indexed(tl,-3,temp);
3227   }
3228   set_jump_target(done0,(int)out);
3229   set_jump_target(done1,(int)out);
3230   set_jump_target(done2,(int)out);
3231   if (opcode[i]==0x2C) { // SDL
3232     emit_testimm(temp,4);
3233     done0=(int)out;
3234     emit_jne(0);
3235     emit_andimm(temp,~3,temp);
3236     emit_writeword_indexed(temp2,4,temp);
3237     set_jump_target(done0,(int)out);
3238   }
3239   if (opcode[i]==0x2D) { // SDR
3240     emit_testimm(temp,4);
3241     done0=(int)out;
3242     emit_jeq(0);
3243     emit_andimm(temp,~3,temp);
3244     emit_writeword_indexed(temp2,-4,temp);
3245     set_jump_target(done0,(int)out);
3246   }
3247   if(!c||!memtarget)
3248     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3249   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3250     #ifdef RAM_OFFSET
3251     int map=get_reg(i_regs->regmap,ROREG);
3252     if(map<0) map=HOST_TEMPREG;
3253     gen_orig_addr_w(temp,map);
3254     #else
3255     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3256     #endif
3257     #if defined(HOST_IMM8)
3258     int ir=get_reg(i_regs->regmap,INVCP);
3259     assert(ir>=0);
3260     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3261     #else
3262     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3263     #endif
3264     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3265     emit_callne(invalidate_addr_reg[temp]);
3266     #else
3267     int jaddr2=(int)out;
3268     emit_jne(0);
3269     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3270     #endif
3271   }
3272   /*
3273     emit_pusha();
3274     //save_regs(0x100f);
3275         emit_readword((int)&last_count,ECX);
3276         if(get_reg(i_regs->regmap,CCREG)<0)
3277           emit_loadreg(CCREG,HOST_CCREG);
3278         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3279         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3280         emit_writeword(HOST_CCREG,(int)&Count);
3281     emit_call((int)memdebug);
3282     emit_popa();
3283     //restore_regs(0x100f);
3284   */
3285 }
3286
3287 void c1ls_assemble(int i,struct regstat *i_regs)
3288 {
3289   cop1_unusable(i, i_regs);
3290 }
3291
3292 void c2ls_assemble(int i,struct regstat *i_regs)
3293 {
3294   int s,tl;
3295   int ar;
3296   int offset;
3297   int memtarget=0,c=0;
3298   int jaddr2=0,type;
3299   int agr=AGEN1+(i&1);
3300   int fastio_reg_override=0;
3301   u_int hr,reglist=0;
3302   u_int copr=(source[i]>>16)&0x1f;
3303   s=get_reg(i_regs->regmap,rs1[i]);
3304   tl=get_reg(i_regs->regmap,FTEMP);
3305   offset=imm[i];
3306   assert(rs1[i]>0);
3307   assert(tl>=0);
3308
3309   for(hr=0;hr<HOST_REGS;hr++) {
3310     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3311   }
3312   if(i_regs->regmap[HOST_CCREG]==CCREG)
3313     reglist&=~(1<<HOST_CCREG);
3314
3315   // get the address
3316   if (opcode[i]==0x3a) { // SWC2
3317     ar=get_reg(i_regs->regmap,agr);
3318     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3319     reglist|=1<<ar;
3320   } else { // LWC2
3321     ar=tl;
3322   }
3323   if(s>=0) c=(i_regs->wasconst>>s)&1;
3324   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3325   if (!offset&&!c&&s>=0) ar=s;
3326   assert(ar>=0);
3327
3328   if (opcode[i]==0x3a) { // SWC2
3329     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3330     type=STOREW_STUB;
3331   }
3332   else
3333     type=LOADW_STUB;
3334
3335   if(c&&!memtarget) {
3336     jaddr2=(int)out;
3337     emit_jmp(0); // inline_readstub/inline_writestub?
3338   }
3339   else {
3340     if(!c) {
3341       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3342     }
3343     else if(ram_offset&&memtarget) {
3344       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3345       fastio_reg_override=HOST_TEMPREG;
3346     }
3347     if (opcode[i]==0x32) { // LWC2
3348       #ifdef HOST_IMM_ADDR32
3349       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3350       else
3351       #endif
3352       int a=ar;
3353       if(fastio_reg_override) a=fastio_reg_override;
3354       emit_readword_indexed(0,a,tl);
3355     }
3356     if (opcode[i]==0x3a) { // SWC2
3357       #ifdef DESTRUCTIVE_SHIFT
3358       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3359       #endif
3360       int a=ar;
3361       if(fastio_reg_override) a=fastio_reg_override;
3362       emit_writeword_indexed(tl,0,a);
3363     }
3364   }
3365   if(jaddr2)
3366     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3367   if(opcode[i]==0x3a) // SWC2
3368   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3369 #if defined(HOST_IMM8)
3370     int ir=get_reg(i_regs->regmap,INVCP);
3371     assert(ir>=0);
3372     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3373 #else
3374     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3375 #endif
3376     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3377     emit_callne(invalidate_addr_reg[ar]);
3378     #else
3379     int jaddr3=(int)out;
3380     emit_jne(0);
3381     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3382     #endif
3383   }
3384   if (opcode[i]==0x32) { // LWC2
3385     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3386   }
3387 }
3388
3389 #ifndef multdiv_assemble
3390 void multdiv_assemble(int i,struct regstat *i_regs)
3391 {
3392   printf("Need multdiv_assemble for this architecture.\n");
3393   exit(1);
3394 }
3395 #endif
3396
3397 void mov_assemble(int i,struct regstat *i_regs)
3398 {
3399   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3400   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3401   if(rt1[i]) {
3402     signed char sh,sl,th,tl;
3403     th=get_reg(i_regs->regmap,rt1[i]|64);
3404     tl=get_reg(i_regs->regmap,rt1[i]);
3405     //assert(tl>=0);
3406     if(tl>=0) {
3407       sh=get_reg(i_regs->regmap,rs1[i]|64);
3408       sl=get_reg(i_regs->regmap,rs1[i]);
3409       if(sl>=0) emit_mov(sl,tl);
3410       else emit_loadreg(rs1[i],tl);
3411       if(th>=0) {
3412         if(sh>=0) emit_mov(sh,th);
3413         else emit_loadreg(rs1[i]|64,th);
3414       }
3415     }
3416   }
3417 }
3418
3419 #ifndef fconv_assemble
3420 void fconv_assemble(int i,struct regstat *i_regs)
3421 {
3422   printf("Need fconv_assemble for this architecture.\n");
3423   exit(1);
3424 }
3425 #endif
3426
3427 #if 0
3428 void float_assemble(int i,struct regstat *i_regs)
3429 {
3430   printf("Need float_assemble for this architecture.\n");
3431   exit(1);
3432 }
3433 #endif
3434
3435 void syscall_assemble(int i,struct regstat *i_regs)
3436 {
3437   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3438   assert(ccreg==HOST_CCREG);
3439   assert(!is_delayslot);
3440   (void)ccreg;
3441   emit_movimm(start+i*4,EAX); // Get PC
3442   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3443   emit_jmp((int)jump_syscall_hle); // XXX
3444 }
3445
3446 void hlecall_assemble(int i,struct regstat *i_regs)
3447 {
3448   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3449   assert(ccreg==HOST_CCREG);
3450   assert(!is_delayslot);
3451   (void)ccreg;
3452   emit_movimm(start+i*4+4,0); // Get PC
3453   emit_movimm((int)psxHLEt[source[i]&7],1);
3454   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3455   emit_jmp((int)jump_hlecall);
3456 }
3457
3458 void intcall_assemble(int i,struct regstat *i_regs)
3459 {
3460   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3461   assert(ccreg==HOST_CCREG);
3462   assert(!is_delayslot);
3463   (void)ccreg;
3464   emit_movimm(start+i*4,0); // Get PC
3465   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3466   emit_jmp((int)jump_intcall);
3467 }
3468
3469 void ds_assemble(int i,struct regstat *i_regs)
3470 {
3471   speculate_register_values(i);
3472   is_delayslot=1;
3473   switch(itype[i]) {
3474     case ALU:
3475       alu_assemble(i,i_regs);break;
3476     case IMM16:
3477       imm16_assemble(i,i_regs);break;
3478     case SHIFT:
3479       shift_assemble(i,i_regs);break;
3480     case SHIFTIMM:
3481       shiftimm_assemble(i,i_regs);break;
3482     case LOAD:
3483       load_assemble(i,i_regs);break;
3484     case LOADLR:
3485       loadlr_assemble(i,i_regs);break;
3486     case STORE:
3487       store_assemble(i,i_regs);break;
3488     case STORELR:
3489       storelr_assemble(i,i_regs);break;
3490     case COP0:
3491       cop0_assemble(i,i_regs);break;
3492     case COP1:
3493       cop1_assemble(i,i_regs);break;
3494     case C1LS:
3495       c1ls_assemble(i,i_regs);break;
3496     case COP2:
3497       cop2_assemble(i,i_regs);break;
3498     case C2LS:
3499       c2ls_assemble(i,i_regs);break;
3500     case C2OP:
3501       c2op_assemble(i,i_regs);break;
3502     case FCONV:
3503       fconv_assemble(i,i_regs);break;
3504     case FLOAT:
3505       float_assemble(i,i_regs);break;
3506     case FCOMP:
3507       fcomp_assemble(i,i_regs);break;
3508     case MULTDIV:
3509       multdiv_assemble(i,i_regs);break;
3510     case MOV:
3511       mov_assemble(i,i_regs);break;
3512     case SYSCALL:
3513     case HLECALL:
3514     case INTCALL:
3515     case SPAN:
3516     case UJUMP:
3517     case RJUMP:
3518     case CJUMP:
3519     case SJUMP:
3520     case FJUMP:
3521       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
3522   }
3523   is_delayslot=0;
3524 }
3525
3526 // Is the branch target a valid internal jump?
3527 int internal_branch(uint64_t i_is32,int addr)
3528 {
3529   if(addr&1) return 0; // Indirect (register) jump
3530   if(addr>=start && addr<start+slen*4-4)
3531   {
3532     //int t=(addr-start)>>2;
3533     // Delay slots are not valid branch targets
3534     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3535     // 64 -> 32 bit transition requires a recompile
3536     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3537     {
3538       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3539       else printf("optimizable: yes\n");
3540     }*/
3541     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3542     return 1;
3543   }
3544   return 0;
3545 }
3546
3547 #ifndef wb_invalidate
3548 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3549   uint64_t u,uint64_t uu)
3550 {
3551   int hr;
3552   for(hr=0;hr<HOST_REGS;hr++) {
3553     if(hr!=EXCLUDE_REG) {
3554       if(pre[hr]!=entry[hr]) {
3555         if(pre[hr]>=0) {
3556           if((dirty>>hr)&1) {
3557             if(get_reg(entry,pre[hr])<0) {
3558               if(pre[hr]<64) {
3559                 if(!((u>>pre[hr])&1)) {
3560                   emit_storereg(pre[hr],hr);
3561                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3562                     emit_sarimm(hr,31,hr);
3563                     emit_storereg(pre[hr]|64,hr);
3564                   }
3565                 }
3566               }else{
3567                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3568                   emit_storereg(pre[hr],hr);
3569                 }
3570               }
3571             }
3572           }
3573         }
3574       }
3575     }
3576   }
3577   // Move from one register to another (no writeback)
3578   for(hr=0;hr<HOST_REGS;hr++) {
3579     if(hr!=EXCLUDE_REG) {
3580       if(pre[hr]!=entry[hr]) {
3581         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3582           int nr;
3583           if((nr=get_reg(entry,pre[hr]))>=0) {
3584             emit_mov(hr,nr);
3585           }
3586         }
3587       }
3588     }
3589   }
3590 }
3591 #endif
3592
3593 // Load the specified registers
3594 // This only loads the registers given as arguments because
3595 // we don't want to load things that will be overwritten
3596 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3597 {
3598   int hr;
3599   // Load 32-bit regs
3600   for(hr=0;hr<HOST_REGS;hr++) {
3601     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3602       if(entry[hr]!=regmap[hr]) {
3603         if(regmap[hr]==rs1||regmap[hr]==rs2)
3604         {
3605           if(regmap[hr]==0) {
3606             emit_zeroreg(hr);
3607           }
3608           else
3609           {
3610             emit_loadreg(regmap[hr],hr);
3611           }
3612         }
3613       }
3614     }
3615   }
3616   //Load 64-bit regs
3617   for(hr=0;hr<HOST_REGS;hr++) {
3618     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3619       if(entry[hr]!=regmap[hr]) {
3620         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3621         {
3622           assert(regmap[hr]!=64);
3623           if((is32>>(regmap[hr]&63))&1) {
3624             int lr=get_reg(regmap,regmap[hr]-64);
3625             if(lr>=0)
3626               emit_sarimm(lr,31,hr);
3627             else
3628               emit_loadreg(regmap[hr],hr);
3629           }
3630           else
3631           {
3632             emit_loadreg(regmap[hr],hr);
3633           }
3634         }
3635       }
3636     }
3637   }
3638 }
3639
3640 // Load registers prior to the start of a loop
3641 // so that they are not loaded within the loop
3642 static void loop_preload(signed char pre[],signed char entry[])
3643 {
3644   int hr;
3645   for(hr=0;hr<HOST_REGS;hr++) {
3646     if(hr!=EXCLUDE_REG) {
3647       if(pre[hr]!=entry[hr]) {
3648         if(entry[hr]>=0) {
3649           if(get_reg(pre,entry[hr])<0) {
3650             assem_debug("loop preload:\n");
3651             //printf("loop preload: %d\n",hr);
3652             if(entry[hr]==0) {
3653               emit_zeroreg(hr);
3654             }
3655             else if(entry[hr]<TEMPREG)
3656             {
3657               emit_loadreg(entry[hr],hr);
3658             }
3659             else if(entry[hr]-64<TEMPREG)
3660             {
3661               emit_loadreg(entry[hr],hr);
3662             }
3663           }
3664         }
3665       }
3666     }
3667   }
3668 }
3669
3670 // Generate address for load/store instruction
3671 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3672 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3673 {
3674   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3675     int ra=-1;
3676     int agr=AGEN1+(i&1);
3677     if(itype[i]==LOAD) {
3678       ra=get_reg(i_regs->regmap,rt1[i]);
3679       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3680       assert(ra>=0);
3681     }
3682     if(itype[i]==LOADLR) {
3683       ra=get_reg(i_regs->regmap,FTEMP);
3684     }
3685     if(itype[i]==STORE||itype[i]==STORELR) {
3686       ra=get_reg(i_regs->regmap,agr);
3687       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3688     }
3689     if(itype[i]==C1LS||itype[i]==C2LS) {
3690       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3691         ra=get_reg(i_regs->regmap,FTEMP);
3692       else { // SWC1/SDC1/SWC2/SDC2
3693         ra=get_reg(i_regs->regmap,agr);
3694         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3695       }
3696     }
3697     int rs=get_reg(i_regs->regmap,rs1[i]);
3698     if(ra>=0) {
3699       int offset=imm[i];
3700       int c=(i_regs->wasconst>>rs)&1;
3701       if(rs1[i]==0) {
3702         // Using r0 as a base address
3703         if(!entry||entry[ra]!=agr) {
3704           if (opcode[i]==0x22||opcode[i]==0x26) {
3705             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3706           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3707             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3708           }else{
3709             emit_movimm(offset,ra);
3710           }
3711         } // else did it in the previous cycle
3712       }
3713       else if(rs<0) {
3714         if(!entry||entry[ra]!=rs1[i])
3715           emit_loadreg(rs1[i],ra);
3716         //if(!entry||entry[ra]!=rs1[i])
3717         //  printf("poor load scheduling!\n");
3718       }
3719       else if(c) {
3720         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3721           if(!entry||entry[ra]!=agr) {
3722             if (opcode[i]==0x22||opcode[i]==0x26) {
3723               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3724             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3725               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3726             }else{
3727               #ifdef HOST_IMM_ADDR32
3728               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3729               #endif
3730               emit_movimm(constmap[i][rs]+offset,ra);
3731               regs[i].loadedconst|=1<<ra;
3732             }
3733           } // else did it in the previous cycle
3734         } // else load_consts already did it
3735       }
3736       if(offset&&!c&&rs1[i]) {
3737         if(rs>=0) {
3738           emit_addimm(rs,offset,ra);
3739         }else{
3740           emit_addimm(ra,offset,ra);
3741         }
3742       }
3743     }
3744   }
3745   // Preload constants for next instruction
3746   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3747     int agr,ra;
3748     // Actual address
3749     agr=AGEN1+((i+1)&1);
3750     ra=get_reg(i_regs->regmap,agr);
3751     if(ra>=0) {
3752       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
3753       int offset=imm[i+1];
3754       int c=(regs[i+1].wasconst>>rs)&1;
3755       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
3756         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3757           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3758         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3759           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3760         }else{
3761           #ifdef HOST_IMM_ADDR32
3762           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3763           #endif
3764           emit_movimm(constmap[i+1][rs]+offset,ra);
3765           regs[i+1].loadedconst|=1<<ra;
3766         }
3767       }
3768       else if(rs1[i+1]==0) {
3769         // Using r0 as a base address
3770         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3771           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3772         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3773           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3774         }else{
3775           emit_movimm(offset,ra);
3776         }
3777       }
3778     }
3779   }
3780 }
3781
3782 static int get_final_value(int hr, int i, int *value)
3783 {
3784   int reg=regs[i].regmap[hr];
3785   while(i<slen-1) {
3786     if(regs[i+1].regmap[hr]!=reg) break;
3787     if(!((regs[i+1].isconst>>hr)&1)) break;
3788     if(bt[i+1]) break;
3789     i++;
3790   }
3791   if(i<slen-1) {
3792     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
3793       *value=constmap[i][hr];
3794       return 1;
3795     }
3796     if(!bt[i+1]) {
3797       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
3798         // Load in delay slot, out-of-order execution
3799         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
3800         {
3801           // Precompute load address
3802           *value=constmap[i][hr]+imm[i+2];
3803           return 1;
3804         }
3805       }
3806       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
3807       {
3808         // Precompute load address
3809         *value=constmap[i][hr]+imm[i+1];
3810         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
3811         return 1;
3812       }
3813     }
3814   }
3815   *value=constmap[i][hr];
3816   //printf("c=%x\n",(int)constmap[i][hr]);
3817   if(i==slen-1) return 1;
3818   if(reg<64) {
3819     return !((unneeded_reg[i+1]>>reg)&1);
3820   }else{
3821     return !((unneeded_reg_upper[i+1]>>reg)&1);
3822   }
3823 }
3824
3825 // Load registers with known constants
3826 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
3827 {
3828   int hr,hr2;
3829   // propagate loaded constant flags
3830   if(i==0||bt[i])
3831     regs[i].loadedconst=0;
3832   else {
3833     for(hr=0;hr<HOST_REGS;hr++) {
3834       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
3835          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
3836       {
3837         regs[i].loadedconst|=1<<hr;
3838       }
3839     }
3840   }
3841   // Load 32-bit regs
3842   for(hr=0;hr<HOST_REGS;hr++) {
3843     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3844       //if(entry[hr]!=regmap[hr]) {
3845       if(!((regs[i].loadedconst>>hr)&1)) {
3846         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3847           int value,similar=0;
3848           if(get_final_value(hr,i,&value)) {
3849             // see if some other register has similar value
3850             for(hr2=0;hr2<HOST_REGS;hr2++) {
3851               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
3852                 if(is_similar_value(value,constmap[i][hr2])) {
3853                   similar=1;
3854                   break;
3855                 }
3856               }
3857             }
3858             if(similar) {
3859               int value2;
3860               if(get_final_value(hr2,i,&value2)) // is this needed?
3861                 emit_movimm_from(value2,hr2,value,hr);
3862               else
3863                 emit_movimm(value,hr);
3864             }
3865             else if(value==0) {
3866               emit_zeroreg(hr);
3867             }
3868             else {
3869               emit_movimm(value,hr);
3870             }
3871           }
3872           regs[i].loadedconst|=1<<hr;
3873         }
3874       }
3875     }
3876   }
3877   // Load 64-bit regs
3878   for(hr=0;hr<HOST_REGS;hr++) {
3879     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3880       //if(entry[hr]!=regmap[hr]) {
3881       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
3882         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3883           if((is32>>(regmap[hr]&63))&1) {
3884             int lr=get_reg(regmap,regmap[hr]-64);
3885             assert(lr>=0);
3886             emit_sarimm(lr,31,hr);
3887           }
3888           else
3889           {
3890             int value;
3891             if(get_final_value(hr,i,&value)) {
3892               if(value==0) {
3893                 emit_zeroreg(hr);
3894               }
3895               else {
3896                 emit_movimm(value,hr);
3897               }
3898             }
3899           }
3900         }
3901       }
3902     }
3903   }
3904 }
3905 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
3906 {
3907   int hr;
3908   // Load 32-bit regs
3909   for(hr=0;hr<HOST_REGS;hr++) {
3910     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3911       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3912         int value=constmap[i][hr];
3913         if(value==0) {
3914           emit_zeroreg(hr);
3915         }
3916         else {
3917           emit_movimm(value,hr);
3918         }
3919       }
3920     }
3921   }
3922   // Load 64-bit regs
3923   for(hr=0;hr<HOST_REGS;hr++) {
3924     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3925       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3926         if((is32>>(regmap[hr]&63))&1) {
3927           int lr=get_reg(regmap,regmap[hr]-64);
3928           assert(lr>=0);
3929           emit_sarimm(lr,31,hr);
3930         }
3931         else
3932         {
3933           int value=constmap[i][hr];
3934           if(value==0) {
3935             emit_zeroreg(hr);
3936           }
3937           else {
3938             emit_movimm(value,hr);
3939           }
3940         }
3941       }
3942     }
3943   }
3944 }
3945
3946 // Write out all dirty registers (except cycle count)
3947 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
3948 {
3949   int hr;
3950   for(hr=0;hr<HOST_REGS;hr++) {
3951     if(hr!=EXCLUDE_REG) {
3952       if(i_regmap[hr]>0) {
3953         if(i_regmap[hr]!=CCREG) {
3954           if((i_dirty>>hr)&1) {
3955             if(i_regmap[hr]<64) {
3956               emit_storereg(i_regmap[hr],hr);
3957             }else{
3958               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3959                 emit_storereg(i_regmap[hr],hr);
3960               }
3961             }
3962           }
3963         }
3964       }
3965     }
3966   }
3967 }
3968 // Write out dirty registers that we need to reload (pair with load_needed_regs)
3969 // This writes the registers not written by store_regs_bt
3970 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
3971 {
3972   int hr;
3973   int t=(addr-start)>>2;
3974   for(hr=0;hr<HOST_REGS;hr++) {
3975     if(hr!=EXCLUDE_REG) {
3976       if(i_regmap[hr]>0) {
3977         if(i_regmap[hr]!=CCREG) {
3978           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
3979             if((i_dirty>>hr)&1) {
3980               if(i_regmap[hr]<64) {
3981                 emit_storereg(i_regmap[hr],hr);
3982               }else{
3983                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3984                   emit_storereg(i_regmap[hr],hr);
3985                 }
3986               }
3987             }
3988           }
3989         }
3990       }
3991     }
3992   }
3993 }
3994
3995 // Load all registers (except cycle count)
3996 void load_all_regs(signed char i_regmap[])
3997 {
3998   int hr;
3999   for(hr=0;hr<HOST_REGS;hr++) {
4000     if(hr!=EXCLUDE_REG) {
4001       if(i_regmap[hr]==0) {
4002         emit_zeroreg(hr);
4003       }
4004       else
4005       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4006       {
4007         emit_loadreg(i_regmap[hr],hr);
4008       }
4009     }
4010   }
4011 }
4012
4013 // Load all current registers also needed by next instruction
4014 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4015 {
4016   int hr;
4017   for(hr=0;hr<HOST_REGS;hr++) {
4018     if(hr!=EXCLUDE_REG) {
4019       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4020         if(i_regmap[hr]==0) {
4021           emit_zeroreg(hr);
4022         }
4023         else
4024         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4025         {
4026           emit_loadreg(i_regmap[hr],hr);
4027         }
4028       }
4029     }
4030   }
4031 }
4032
4033 // Load all regs, storing cycle count if necessary
4034 void load_regs_entry(int t)
4035 {
4036   int hr;
4037   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4038   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4039   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4040     emit_storereg(CCREG,HOST_CCREG);
4041   }
4042   // Load 32-bit regs
4043   for(hr=0;hr<HOST_REGS;hr++) {
4044     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4045       if(regs[t].regmap_entry[hr]==0) {
4046         emit_zeroreg(hr);
4047       }
4048       else if(regs[t].regmap_entry[hr]!=CCREG)
4049       {
4050         emit_loadreg(regs[t].regmap_entry[hr],hr);
4051       }
4052     }
4053   }
4054   // Load 64-bit regs
4055   for(hr=0;hr<HOST_REGS;hr++) {
4056     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4057       assert(regs[t].regmap_entry[hr]!=64);
4058       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4059         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4060         if(lr<0) {
4061           emit_loadreg(regs[t].regmap_entry[hr],hr);
4062         }
4063         else
4064         {
4065           emit_sarimm(lr,31,hr);
4066         }
4067       }
4068       else
4069       {
4070         emit_loadreg(regs[t].regmap_entry[hr],hr);
4071       }
4072     }
4073   }
4074 }
4075
4076 // Store dirty registers prior to branch
4077 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4078 {
4079   if(internal_branch(i_is32,addr))
4080   {
4081     int t=(addr-start)>>2;
4082     int hr;
4083     for(hr=0;hr<HOST_REGS;hr++) {
4084       if(hr!=EXCLUDE_REG) {
4085         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4086           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4087             if((i_dirty>>hr)&1) {
4088               if(i_regmap[hr]<64) {
4089                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4090                   emit_storereg(i_regmap[hr],hr);
4091                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4092                     #ifdef DESTRUCTIVE_WRITEBACK
4093                     emit_sarimm(hr,31,hr);
4094                     emit_storereg(i_regmap[hr]|64,hr);
4095                     #else
4096                     emit_sarimm(hr,31,HOST_TEMPREG);
4097                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4098                     #endif
4099                   }
4100                 }
4101               }else{
4102                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4103                   emit_storereg(i_regmap[hr],hr);
4104                 }
4105               }
4106             }
4107           }
4108         }
4109       }
4110     }
4111   }
4112   else
4113   {
4114     // Branch out of this block, write out all dirty regs
4115     wb_dirtys(i_regmap,i_is32,i_dirty);
4116   }
4117 }
4118
4119 // Load all needed registers for branch target
4120 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4121 {
4122   //if(addr>=start && addr<(start+slen*4))
4123   if(internal_branch(i_is32,addr))
4124   {
4125     int t=(addr-start)>>2;
4126     int hr;
4127     // Store the cycle count before loading something else
4128     if(i_regmap[HOST_CCREG]!=CCREG) {
4129       assert(i_regmap[HOST_CCREG]==-1);
4130     }
4131     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4132       emit_storereg(CCREG,HOST_CCREG);
4133     }
4134     // Load 32-bit regs
4135     for(hr=0;hr<HOST_REGS;hr++) {
4136       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4137         #ifdef DESTRUCTIVE_WRITEBACK
4138         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4139         #else
4140         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4141         #endif
4142           if(regs[t].regmap_entry[hr]==0) {
4143             emit_zeroreg(hr);
4144           }
4145           else if(regs[t].regmap_entry[hr]!=CCREG)
4146           {
4147             emit_loadreg(regs[t].regmap_entry[hr],hr);
4148           }
4149         }
4150       }
4151     }
4152     //Load 64-bit regs
4153     for(hr=0;hr<HOST_REGS;hr++) {
4154       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4155         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4156           assert(regs[t].regmap_entry[hr]!=64);
4157           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4158             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4159             if(lr<0) {
4160               emit_loadreg(regs[t].regmap_entry[hr],hr);
4161             }
4162             else
4163             {
4164               emit_sarimm(lr,31,hr);
4165             }
4166           }
4167           else
4168           {
4169             emit_loadreg(regs[t].regmap_entry[hr],hr);
4170           }
4171         }
4172         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4173           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4174           assert(lr>=0);
4175           emit_sarimm(lr,31,hr);
4176         }
4177       }
4178     }
4179   }
4180 }
4181
4182 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4183 {
4184   if(addr>=start && addr<start+slen*4-4)
4185   {
4186     int t=(addr-start)>>2;
4187     int hr;
4188     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4189     for(hr=0;hr<HOST_REGS;hr++)
4190     {
4191       if(hr!=EXCLUDE_REG)
4192       {
4193         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4194         {
4195           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4196           {
4197             return 0;
4198           }
4199           else
4200           if((i_dirty>>hr)&1)
4201           {
4202             if(i_regmap[hr]<TEMPREG)
4203             {
4204               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4205                 return 0;
4206             }
4207             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4208             {
4209               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4210                 return 0;
4211             }
4212           }
4213         }
4214         else // Same register but is it 32-bit or dirty?
4215         if(i_regmap[hr]>=0)
4216         {
4217           if(!((regs[t].dirty>>hr)&1))
4218           {
4219             if((i_dirty>>hr)&1)
4220             {
4221               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4222               {
4223                 //printf("%x: dirty no match\n",addr);
4224                 return 0;
4225               }
4226             }
4227           }
4228           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4229           {
4230             //printf("%x: is32 no match\n",addr);
4231             return 0;
4232           }
4233         }
4234       }
4235     }
4236     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4237     // Delay slots are not valid branch targets
4238     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4239     // Delay slots require additional processing, so do not match
4240     if(is_ds[t]) return 0;
4241   }
4242   else
4243   {
4244     int hr;
4245     for(hr=0;hr<HOST_REGS;hr++)
4246     {
4247       if(hr!=EXCLUDE_REG)
4248       {
4249         if(i_regmap[hr]>=0)
4250         {
4251           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4252           {
4253             if((i_dirty>>hr)&1)
4254             {
4255               return 0;
4256             }
4257           }
4258         }
4259       }
4260     }
4261   }
4262   return 1;
4263 }
4264
4265 // Used when a branch jumps into the delay slot of another branch
4266 void ds_assemble_entry(int i)
4267 {
4268   int t=(ba[i]-start)>>2;
4269   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4270   assem_debug("Assemble delay slot at %x\n",ba[i]);
4271   assem_debug("<->\n");
4272   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4273     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4274   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4275   address_generation(t,&regs[t],regs[t].regmap_entry);
4276   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4277     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4278   cop1_usable=0;
4279   is_delayslot=0;
4280   switch(itype[t]) {
4281     case ALU:
4282       alu_assemble(t,&regs[t]);break;
4283     case IMM16:
4284       imm16_assemble(t,&regs[t]);break;
4285     case SHIFT:
4286       shift_assemble(t,&regs[t]);break;
4287     case SHIFTIMM:
4288       shiftimm_assemble(t,&regs[t]);break;
4289     case LOAD:
4290       load_assemble(t,&regs[t]);break;
4291     case LOADLR:
4292       loadlr_assemble(t,&regs[t]);break;
4293     case STORE:
4294       store_assemble(t,&regs[t]);break;
4295     case STORELR:
4296       storelr_assemble(t,&regs[t]);break;
4297     case COP0:
4298       cop0_assemble(t,&regs[t]);break;
4299     case COP1:
4300       cop1_assemble(t,&regs[t]);break;
4301     case C1LS:
4302       c1ls_assemble(t,&regs[t]);break;
4303     case COP2:
4304       cop2_assemble(t,&regs[t]);break;
4305     case C2LS:
4306       c2ls_assemble(t,&regs[t]);break;
4307     case C2OP:
4308       c2op_assemble(t,&regs[t]);break;
4309     case FCONV:
4310       fconv_assemble(t,&regs[t]);break;
4311     case FLOAT:
4312       float_assemble(t,&regs[t]);break;
4313     case FCOMP:
4314       fcomp_assemble(t,&regs[t]);break;
4315     case MULTDIV:
4316       multdiv_assemble(t,&regs[t]);break;
4317     case MOV:
4318       mov_assemble(t,&regs[t]);break;
4319     case SYSCALL:
4320     case HLECALL:
4321     case INTCALL:
4322     case SPAN:
4323     case UJUMP:
4324     case RJUMP:
4325     case CJUMP:
4326     case SJUMP:
4327     case FJUMP:
4328       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4329   }
4330   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4331   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4332   if(internal_branch(regs[t].is32,ba[i]+4))
4333     assem_debug("branch: internal\n");
4334   else
4335     assem_debug("branch: external\n");
4336   assert(internal_branch(regs[t].is32,ba[i]+4));
4337   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4338   emit_jmp(0);
4339 }
4340
4341 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4342 {
4343   int count;
4344   int jaddr;
4345   int idle=0;
4346   int t=0;
4347   if(itype[i]==RJUMP)
4348   {
4349     *adj=0;
4350   }
4351   //if(ba[i]>=start && ba[i]<(start+slen*4))
4352   if(internal_branch(branch_regs[i].is32,ba[i]))
4353   {
4354     t=(ba[i]-start)>>2;
4355     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4356     else *adj=ccadj[t];
4357   }
4358   else
4359   {
4360     *adj=0;
4361   }
4362   count=ccadj[i];
4363   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4364     // Idle loop
4365     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4366     idle=(int)out;
4367     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4368     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4369     jaddr=(int)out;
4370     emit_jmp(0);
4371   }
4372   else if(*adj==0||invert) {
4373     int cycles=CLOCK_ADJUST(count+2);
4374     // faster loop HACK
4375     if (t&&*adj) {
4376       int rel=t-i;
4377       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4378         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4379     }
4380     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4381     jaddr=(int)out;
4382     emit_jns(0);
4383   }
4384   else
4385   {
4386     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4387     jaddr=(int)out;
4388     emit_jns(0);
4389   }
4390   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4391 }
4392
4393 void do_ccstub(int n)
4394 {
4395   literal_pool(256);
4396   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4397   set_jump_target(stubs[n][1],(int)out);
4398   int i=stubs[n][4];
4399   if(stubs[n][6]==NULLDS) {
4400     // Delay slot instruction is nullified ("likely" branch)
4401     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4402   }
4403   else if(stubs[n][6]!=TAKEN) {
4404     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4405   }
4406   else {
4407     if(internal_branch(branch_regs[i].is32,ba[i]))
4408       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4409   }
4410   if(stubs[n][5]!=-1)
4411   {
4412     // Save PC as return address
4413     emit_movimm(stubs[n][5],EAX);
4414     emit_writeword(EAX,(int)&pcaddr);
4415   }
4416   else
4417   {
4418     // Return address depends on which way the branch goes
4419     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4420     {
4421       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4422       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4423       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4424       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4425       if(rs1[i]==0)
4426       {
4427         s1l=s2l;s1h=s2h;
4428         s2l=s2h=-1;
4429       }
4430       else if(rs2[i]==0)
4431       {
4432         s2l=s2h=-1;
4433       }
4434       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4435         s1h=s2h=-1;
4436       }
4437       assert(s1l>=0);
4438       #ifdef DESTRUCTIVE_WRITEBACK
4439       if(rs1[i]) {
4440         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4441           emit_loadreg(rs1[i],s1l);
4442       }
4443       else {
4444         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4445           emit_loadreg(rs2[i],s1l);
4446       }
4447       if(s2l>=0)
4448         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4449           emit_loadreg(rs2[i],s2l);
4450       #endif
4451       int hr=0;
4452       int addr=-1,alt=-1,ntaddr=-1;
4453       while(hr<HOST_REGS)
4454       {
4455         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4456            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4457            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4458         {
4459           addr=hr++;break;
4460         }
4461         hr++;
4462       }
4463       while(hr<HOST_REGS)
4464       {
4465         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4466            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4467            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4468         {
4469           alt=hr++;break;
4470         }
4471         hr++;
4472       }
4473       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4474       {
4475         while(hr<HOST_REGS)
4476         {
4477           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4478              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4479              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4480           {
4481             ntaddr=hr;break;
4482           }
4483           hr++;
4484         }
4485         assert(hr<HOST_REGS);
4486       }
4487       if((opcode[i]&0x2f)==4) // BEQ
4488       {
4489         #ifdef HAVE_CMOV_IMM
4490         if(s1h<0) {
4491           if(s2l>=0) emit_cmp(s1l,s2l);
4492           else emit_test(s1l,s1l);
4493           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4494         }
4495         else
4496         #endif
4497         {
4498           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4499           if(s1h>=0) {
4500             if(s2h>=0) emit_cmp(s1h,s2h);
4501             else emit_test(s1h,s1h);
4502             emit_cmovne_reg(alt,addr);
4503           }
4504           if(s2l>=0) emit_cmp(s1l,s2l);
4505           else emit_test(s1l,s1l);
4506           emit_cmovne_reg(alt,addr);
4507         }
4508       }
4509       if((opcode[i]&0x2f)==5) // BNE
4510       {
4511         #ifdef HAVE_CMOV_IMM
4512         if(s1h<0) {
4513           if(s2l>=0) emit_cmp(s1l,s2l);
4514           else emit_test(s1l,s1l);
4515           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4516         }
4517         else
4518         #endif
4519         {
4520           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4521           if(s1h>=0) {
4522             if(s2h>=0) emit_cmp(s1h,s2h);
4523             else emit_test(s1h,s1h);
4524             emit_cmovne_reg(alt,addr);
4525           }
4526           if(s2l>=0) emit_cmp(s1l,s2l);
4527           else emit_test(s1l,s1l);
4528           emit_cmovne_reg(alt,addr);
4529         }
4530       }
4531       if((opcode[i]&0x2f)==6) // BLEZ
4532       {
4533         //emit_movimm(ba[i],alt);
4534         //emit_movimm(start+i*4+8,addr);
4535         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4536         emit_cmpimm(s1l,1);
4537         if(s1h>=0) emit_mov(addr,ntaddr);
4538         emit_cmovl_reg(alt,addr);
4539         if(s1h>=0) {
4540           emit_test(s1h,s1h);
4541           emit_cmovne_reg(ntaddr,addr);
4542           emit_cmovs_reg(alt,addr);
4543         }
4544       }
4545       if((opcode[i]&0x2f)==7) // BGTZ
4546       {
4547         //emit_movimm(ba[i],addr);
4548         //emit_movimm(start+i*4+8,ntaddr);
4549         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4550         emit_cmpimm(s1l,1);
4551         if(s1h>=0) emit_mov(addr,alt);
4552         emit_cmovl_reg(ntaddr,addr);
4553         if(s1h>=0) {
4554           emit_test(s1h,s1h);
4555           emit_cmovne_reg(alt,addr);
4556           emit_cmovs_reg(ntaddr,addr);
4557         }
4558       }
4559       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4560       {
4561         //emit_movimm(ba[i],alt);
4562         //emit_movimm(start+i*4+8,addr);
4563         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4564         if(s1h>=0) emit_test(s1h,s1h);
4565         else emit_test(s1l,s1l);
4566         emit_cmovs_reg(alt,addr);
4567       }
4568       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4569       {
4570         //emit_movimm(ba[i],addr);
4571         //emit_movimm(start+i*4+8,alt);
4572         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4573         if(s1h>=0) emit_test(s1h,s1h);
4574         else emit_test(s1l,s1l);
4575         emit_cmovs_reg(alt,addr);
4576       }
4577       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4578         if(source[i]&0x10000) // BC1T
4579         {
4580           //emit_movimm(ba[i],alt);
4581           //emit_movimm(start+i*4+8,addr);
4582           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4583           emit_testimm(s1l,0x800000);
4584           emit_cmovne_reg(alt,addr);
4585         }
4586         else // BC1F
4587         {
4588           //emit_movimm(ba[i],addr);
4589           //emit_movimm(start+i*4+8,alt);
4590           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4591           emit_testimm(s1l,0x800000);
4592           emit_cmovne_reg(alt,addr);
4593         }
4594       }
4595       emit_writeword(addr,(int)&pcaddr);
4596     }
4597     else
4598     if(itype[i]==RJUMP)
4599     {
4600       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4601       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4602         r=get_reg(branch_regs[i].regmap,RTEMP);
4603       }
4604       emit_writeword(r,(int)&pcaddr);
4605     }
4606     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
4607   }
4608   // Update cycle count
4609   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4610   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4611   emit_call((int)cc_interrupt);
4612   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4613   if(stubs[n][6]==TAKEN) {
4614     if(internal_branch(branch_regs[i].is32,ba[i]))
4615       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4616     else if(itype[i]==RJUMP) {
4617       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4618         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4619       else
4620         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4621     }
4622   }else if(stubs[n][6]==NOTTAKEN) {
4623     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4624     else load_all_regs(branch_regs[i].regmap);
4625   }else if(stubs[n][6]==NULLDS) {
4626     // Delay slot instruction is nullified ("likely" branch)
4627     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4628     else load_all_regs(regs[i].regmap);
4629   }else{
4630     load_all_regs(branch_regs[i].regmap);
4631   }
4632   emit_jmp(stubs[n][2]); // return address
4633
4634   /* This works but uses a lot of memory...
4635   emit_readword((int)&last_count,ECX);
4636   emit_add(HOST_CCREG,ECX,EAX);
4637   emit_writeword(EAX,(int)&Count);
4638   emit_call((int)gen_interupt);
4639   emit_readword((int)&Count,HOST_CCREG);
4640   emit_readword((int)&next_interupt,EAX);
4641   emit_readword((int)&pending_exception,EBX);
4642   emit_writeword(EAX,(int)&last_count);
4643   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4644   emit_test(EBX,EBX);
4645   int jne_instr=(int)out;
4646   emit_jne(0);
4647   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4648   load_all_regs(branch_regs[i].regmap);
4649   emit_jmp(stubs[n][2]); // return address
4650   set_jump_target(jne_instr,(int)out);
4651   emit_readword((int)&pcaddr,EAX);
4652   // Call get_addr_ht instead of doing the hash table here.
4653   // This code is executed infrequently and takes up a lot of space
4654   // so smaller is better.
4655   emit_storereg(CCREG,HOST_CCREG);
4656   emit_pushreg(EAX);
4657   emit_call((int)get_addr_ht);
4658   emit_loadreg(CCREG,HOST_CCREG);
4659   emit_addimm(ESP,4,ESP);
4660   emit_jmpreg(EAX);*/
4661 }
4662
4663 static void add_to_linker(int addr,int target,int ext)
4664 {
4665   link_addr[linkcount][0]=addr;
4666   link_addr[linkcount][1]=target;
4667   link_addr[linkcount][2]=ext;
4668   linkcount++;
4669 }
4670
4671 static void ujump_assemble_write_ra(int i)
4672 {
4673   int rt;
4674   unsigned int return_address;
4675   rt=get_reg(branch_regs[i].regmap,31);
4676   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4677   //assert(rt>=0);
4678   return_address=start+i*4+8;
4679   if(rt>=0) {
4680     #ifdef USE_MINI_HT
4681     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
4682       int temp=-1; // note: must be ds-safe
4683       #ifdef HOST_TEMPREG
4684       temp=HOST_TEMPREG;
4685       #endif
4686       if(temp>=0) do_miniht_insert(return_address,rt,temp);
4687       else emit_movimm(return_address,rt);
4688     }
4689     else
4690     #endif
4691     {
4692       #ifdef REG_PREFETCH
4693       if(temp>=0)
4694       {
4695         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4696       }
4697       #endif
4698       emit_movimm(return_address,rt); // PC into link register
4699       #ifdef IMM_PREFETCH
4700       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4701       #endif
4702     }
4703   }
4704 }
4705
4706 void ujump_assemble(int i,struct regstat *i_regs)
4707 {
4708   int ra_done=0;
4709   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4710   address_generation(i+1,i_regs,regs[i].regmap_entry);
4711   #ifdef REG_PREFETCH
4712   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4713   if(rt1[i]==31&&temp>=0)
4714   {
4715     signed char *i_regmap=i_regs->regmap;
4716     int return_address=start+i*4+8;
4717     if(get_reg(branch_regs[i].regmap,31)>0)
4718     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4719   }
4720   #endif
4721   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4722     ujump_assemble_write_ra(i); // writeback ra for DS
4723     ra_done=1;
4724   }
4725   ds_assemble(i+1,i_regs);
4726   uint64_t bc_unneeded=branch_regs[i].u;
4727   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4728   bc_unneeded|=1|(1LL<<rt1[i]);
4729   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4730   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4731                 bc_unneeded,bc_unneeded_upper);
4732   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4733   if(!ra_done&&rt1[i]==31)
4734     ujump_assemble_write_ra(i);
4735   int cc,adj;
4736   cc=get_reg(branch_regs[i].regmap,CCREG);
4737   assert(cc==HOST_CCREG);
4738   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4739   #ifdef REG_PREFETCH
4740   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4741   #endif
4742   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4743   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4744   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4745   if(internal_branch(branch_regs[i].is32,ba[i]))
4746     assem_debug("branch: internal\n");
4747   else
4748     assem_debug("branch: external\n");
4749   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
4750     ds_assemble_entry(i);
4751   }
4752   else {
4753     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
4754     emit_jmp(0);
4755   }
4756 }
4757
4758 static void rjump_assemble_write_ra(int i)
4759 {
4760   int rt,return_address;
4761   assert(rt1[i+1]!=rt1[i]);
4762   assert(rt2[i+1]!=rt1[i]);
4763   rt=get_reg(branch_regs[i].regmap,rt1[i]);
4764   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4765   assert(rt>=0);
4766   return_address=start+i*4+8;
4767   #ifdef REG_PREFETCH
4768   if(temp>=0)
4769   {
4770     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4771   }
4772   #endif
4773   emit_movimm(return_address,rt); // PC into link register
4774   #ifdef IMM_PREFETCH
4775   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4776   #endif
4777 }
4778
4779 void rjump_assemble(int i,struct regstat *i_regs)
4780 {
4781   int temp;
4782   int rs,cc;
4783   int ra_done=0;
4784   rs=get_reg(branch_regs[i].regmap,rs1[i]);
4785   assert(rs>=0);
4786   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4787     // Delay slot abuse, make a copy of the branch address register
4788     temp=get_reg(branch_regs[i].regmap,RTEMP);
4789     assert(temp>=0);
4790     assert(regs[i].regmap[temp]==RTEMP);
4791     emit_mov(rs,temp);
4792     rs=temp;
4793   }
4794   address_generation(i+1,i_regs,regs[i].regmap_entry);
4795   #ifdef REG_PREFETCH
4796   if(rt1[i]==31)
4797   {
4798     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4799       signed char *i_regmap=i_regs->regmap;
4800       int return_address=start+i*4+8;
4801       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4802     }
4803   }
4804   #endif
4805   #ifdef USE_MINI_HT
4806   if(rs1[i]==31) {
4807     int rh=get_reg(regs[i].regmap,RHASH);
4808     if(rh>=0) do_preload_rhash(rh);
4809   }
4810   #endif
4811   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4812     rjump_assemble_write_ra(i);
4813     ra_done=1;
4814   }
4815   ds_assemble(i+1,i_regs);
4816   uint64_t bc_unneeded=branch_regs[i].u;
4817   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4818   bc_unneeded|=1|(1LL<<rt1[i]);
4819   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4820   bc_unneeded&=~(1LL<<rs1[i]);
4821   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4822                 bc_unneeded,bc_unneeded_upper);
4823   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
4824   if(!ra_done&&rt1[i]!=0)
4825     rjump_assemble_write_ra(i);
4826   cc=get_reg(branch_regs[i].regmap,CCREG);
4827   assert(cc==HOST_CCREG);
4828   (void)cc;
4829   #ifdef USE_MINI_HT
4830   int rh=get_reg(branch_regs[i].regmap,RHASH);
4831   int ht=get_reg(branch_regs[i].regmap,RHTBL);
4832   if(rs1[i]==31) {
4833     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4834     do_preload_rhtbl(ht);
4835     do_rhash(rs,rh);
4836   }
4837   #endif
4838   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4839   #ifdef DESTRUCTIVE_WRITEBACK
4840   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
4841     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4842       emit_loadreg(rs1[i],rs);
4843     }
4844   }
4845   #endif
4846   #ifdef REG_PREFETCH
4847   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4848   #endif
4849   #ifdef USE_MINI_HT
4850   if(rs1[i]==31) {
4851     do_miniht_load(ht,rh);
4852   }
4853   #endif
4854   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4855   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4856   //assert(adj==0);
4857   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4858   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
4859   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4860     // special case for RFE
4861     emit_jmp(0);
4862   else
4863     emit_jns(0);
4864   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4865   #ifdef USE_MINI_HT
4866   if(rs1[i]==31) {
4867     do_miniht_jump(rs,rh,ht);
4868   }
4869   else
4870   #endif
4871   {
4872     //if(rs!=EAX) emit_mov(rs,EAX);
4873     //emit_jmp((int)jump_vaddr_eax);
4874     emit_jmp(jump_vaddr_reg[rs]);
4875   }
4876   /* Check hash table
4877   temp=!rs;
4878   emit_mov(rs,temp);
4879   emit_shrimm(rs,16,rs);
4880   emit_xor(temp,rs,rs);
4881   emit_movzwl_reg(rs,rs);
4882   emit_shlimm(rs,4,rs);
4883   emit_cmpmem_indexed((int)hash_table,rs,temp);
4884   emit_jne((int)out+14);
4885   emit_readword_indexed((int)hash_table+4,rs,rs);
4886   emit_jmpreg(rs);
4887   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
4888   emit_addimm_no_flags(8,rs);
4889   emit_jeq((int)out-17);
4890   // No hit on hash table, call compiler
4891   emit_pushreg(temp);
4892 //DEBUG >
4893 #ifdef DEBUG_CYCLE_COUNT
4894   emit_readword((int)&last_count,ECX);
4895   emit_add(HOST_CCREG,ECX,HOST_CCREG);
4896   emit_readword((int)&next_interupt,ECX);
4897   emit_writeword(HOST_CCREG,(int)&Count);
4898   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
4899   emit_writeword(ECX,(int)&last_count);
4900 #endif
4901 //DEBUG <
4902   emit_storereg(CCREG,HOST_CCREG);
4903   emit_call((int)get_addr);
4904   emit_loadreg(CCREG,HOST_CCREG);
4905   emit_addimm(ESP,4,ESP);
4906   emit_jmpreg(EAX);*/
4907   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4908   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4909   #endif
4910 }
4911
4912 void cjump_assemble(int i,struct regstat *i_regs)
4913 {
4914   signed char *i_regmap=i_regs->regmap;
4915   int cc;
4916   int match;
4917   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4918   assem_debug("match=%d\n",match);
4919   int s1h,s1l,s2h,s2l;
4920   int prev_cop1_usable=cop1_usable;
4921   int unconditional=0,nop=0;
4922   int only32=0;
4923   int invert=0;
4924   int internal=internal_branch(branch_regs[i].is32,ba[i]);
4925   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4926   if(!match) invert=1;
4927   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4928   if(i>(ba[i]-start)>>2) invert=1;
4929   #endif
4930
4931   if(ooo[i]) {
4932     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4933     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4934     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4935     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4936   }
4937   else {
4938     s1l=get_reg(i_regmap,rs1[i]);
4939     s1h=get_reg(i_regmap,rs1[i]|64);
4940     s2l=get_reg(i_regmap,rs2[i]);
4941     s2h=get_reg(i_regmap,rs2[i]|64);
4942   }
4943   if(rs1[i]==0&&rs2[i]==0)
4944   {
4945     if(opcode[i]&1) nop=1;
4946     else unconditional=1;
4947     //assert(opcode[i]!=5);
4948     //assert(opcode[i]!=7);
4949     //assert(opcode[i]!=0x15);
4950     //assert(opcode[i]!=0x17);
4951   }
4952   else if(rs1[i]==0)
4953   {
4954     s1l=s2l;s1h=s2h;
4955     s2l=s2h=-1;
4956     only32=(regs[i].was32>>rs2[i])&1;
4957   }
4958   else if(rs2[i]==0)
4959   {
4960     s2l=s2h=-1;
4961     only32=(regs[i].was32>>rs1[i])&1;
4962   }
4963   else {
4964     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
4965   }
4966
4967   if(ooo[i]) {
4968     // Out of order execution (delay slot first)
4969     //printf("OOOE\n");
4970     address_generation(i+1,i_regs,regs[i].regmap_entry);
4971     ds_assemble(i+1,i_regs);
4972     int adj;
4973     uint64_t bc_unneeded=branch_regs[i].u;
4974     uint64_t bc_unneeded_upper=branch_regs[i].uu;
4975     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
4976     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
4977     bc_unneeded|=1;
4978     bc_unneeded_upper|=1;
4979     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4980                   bc_unneeded,bc_unneeded_upper);
4981     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
4982     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4983     cc=get_reg(branch_regs[i].regmap,CCREG);
4984     assert(cc==HOST_CCREG);
4985     if(unconditional)
4986       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4987     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
4988     //assem_debug("cycle count (adj)\n");
4989     if(unconditional) {
4990       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4991       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
4992         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4993         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4994         if(internal)
4995           assem_debug("branch: internal\n");
4996         else
4997           assem_debug("branch: external\n");
4998         if(internal&&is_ds[(ba[i]-start)>>2]) {
4999           ds_assemble_entry(i);
5000         }
5001         else {
5002           add_to_linker((int)out,ba[i],internal);
5003           emit_jmp(0);
5004         }
5005         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5006         if(((u_int)out)&7) emit_addnop(0);
5007         #endif
5008       }
5009     }
5010     else if(nop) {
5011       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5012       int jaddr=(int)out;
5013       emit_jns(0);
5014       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5015     }
5016     else {
5017       int taken=0,nottaken=0,nottaken1=0;
5018       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5019       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5020       if(!only32)
5021       {
5022         assert(s1h>=0);
5023         if(opcode[i]==4) // BEQ
5024         {
5025           if(s2h>=0) emit_cmp(s1h,s2h);
5026           else emit_test(s1h,s1h);
5027           nottaken1=(int)out;
5028           emit_jne(1);
5029         }
5030         if(opcode[i]==5) // BNE
5031         {
5032           if(s2h>=0) emit_cmp(s1h,s2h);
5033           else emit_test(s1h,s1h);
5034           if(invert) taken=(int)out;
5035           else add_to_linker((int)out,ba[i],internal);
5036           emit_jne(0);
5037         }
5038         if(opcode[i]==6) // BLEZ
5039         {
5040           emit_test(s1h,s1h);
5041           if(invert) taken=(int)out;
5042           else add_to_linker((int)out,ba[i],internal);
5043           emit_js(0);
5044           nottaken1=(int)out;
5045           emit_jne(1);
5046         }
5047         if(opcode[i]==7) // BGTZ
5048         {
5049           emit_test(s1h,s1h);
5050           nottaken1=(int)out;
5051           emit_js(1);
5052           if(invert) taken=(int)out;
5053           else add_to_linker((int)out,ba[i],internal);
5054           emit_jne(0);
5055         }
5056       } // if(!only32)
5057
5058       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5059       assert(s1l>=0);
5060       if(opcode[i]==4) // BEQ
5061       {
5062         if(s2l>=0) emit_cmp(s1l,s2l);
5063         else emit_test(s1l,s1l);
5064         if(invert){
5065           nottaken=(int)out;
5066           emit_jne(1);
5067         }else{
5068           add_to_linker((int)out,ba[i],internal);
5069           emit_jeq(0);
5070         }
5071       }
5072       if(opcode[i]==5) // BNE
5073       {
5074         if(s2l>=0) emit_cmp(s1l,s2l);
5075         else emit_test(s1l,s1l);
5076         if(invert){
5077           nottaken=(int)out;
5078           emit_jeq(1);
5079         }else{
5080           add_to_linker((int)out,ba[i],internal);
5081           emit_jne(0);
5082         }
5083       }
5084       if(opcode[i]==6) // BLEZ
5085       {
5086         emit_cmpimm(s1l,1);
5087         if(invert){
5088           nottaken=(int)out;
5089           emit_jge(1);
5090         }else{
5091           add_to_linker((int)out,ba[i],internal);
5092           emit_jl(0);
5093         }
5094       }
5095       if(opcode[i]==7) // BGTZ
5096       {
5097         emit_cmpimm(s1l,1);
5098         if(invert){
5099           nottaken=(int)out;
5100           emit_jl(1);
5101         }else{
5102           add_to_linker((int)out,ba[i],internal);
5103           emit_jge(0);
5104         }
5105       }
5106       if(invert) {
5107         if(taken) set_jump_target(taken,(int)out);
5108         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5109         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5110           if(adj) {
5111             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5112             add_to_linker((int)out,ba[i],internal);
5113           }else{
5114             emit_addnop(13);
5115             add_to_linker((int)out,ba[i],internal*2);
5116           }
5117           emit_jmp(0);
5118         }else
5119         #endif
5120         {
5121           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5122           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5123           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5124           if(internal)
5125             assem_debug("branch: internal\n");
5126           else
5127             assem_debug("branch: external\n");
5128           if(internal&&is_ds[(ba[i]-start)>>2]) {
5129             ds_assemble_entry(i);
5130           }
5131           else {
5132             add_to_linker((int)out,ba[i],internal);
5133             emit_jmp(0);
5134           }
5135         }
5136         set_jump_target(nottaken,(int)out);
5137       }
5138
5139       if(nottaken1) set_jump_target(nottaken1,(int)out);
5140       if(adj) {
5141         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5142       }
5143     } // (!unconditional)
5144   } // if(ooo)
5145   else
5146   {
5147     // In-order execution (branch first)
5148     //if(likely[i]) printf("IOL\n");
5149     //else
5150     //printf("IOE\n");
5151     int taken=0,nottaken=0,nottaken1=0;
5152     if(!unconditional&&!nop) {
5153       if(!only32)
5154       {
5155         assert(s1h>=0);
5156         if((opcode[i]&0x2f)==4) // BEQ
5157         {
5158           if(s2h>=0) emit_cmp(s1h,s2h);
5159           else emit_test(s1h,s1h);
5160           nottaken1=(int)out;
5161           emit_jne(2);
5162         }
5163         if((opcode[i]&0x2f)==5) // BNE
5164         {
5165           if(s2h>=0) emit_cmp(s1h,s2h);
5166           else emit_test(s1h,s1h);
5167           taken=(int)out;
5168           emit_jne(1);
5169         }
5170         if((opcode[i]&0x2f)==6) // BLEZ
5171         {
5172           emit_test(s1h,s1h);
5173           taken=(int)out;
5174           emit_js(1);
5175           nottaken1=(int)out;
5176           emit_jne(2);
5177         }
5178         if((opcode[i]&0x2f)==7) // BGTZ
5179         {
5180           emit_test(s1h,s1h);
5181           nottaken1=(int)out;
5182           emit_js(2);
5183           taken=(int)out;
5184           emit_jne(1);
5185         }
5186       } // if(!only32)
5187
5188       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5189       assert(s1l>=0);
5190       if((opcode[i]&0x2f)==4) // BEQ
5191       {
5192         if(s2l>=0) emit_cmp(s1l,s2l);
5193         else emit_test(s1l,s1l);
5194         nottaken=(int)out;
5195         emit_jne(2);
5196       }
5197       if((opcode[i]&0x2f)==5) // BNE
5198       {
5199         if(s2l>=0) emit_cmp(s1l,s2l);
5200         else emit_test(s1l,s1l);
5201         nottaken=(int)out;
5202         emit_jeq(2);
5203       }
5204       if((opcode[i]&0x2f)==6) // BLEZ
5205       {
5206         emit_cmpimm(s1l,1);
5207         nottaken=(int)out;
5208         emit_jge(2);
5209       }
5210       if((opcode[i]&0x2f)==7) // BGTZ
5211       {
5212         emit_cmpimm(s1l,1);
5213         nottaken=(int)out;
5214         emit_jl(2);
5215       }
5216     } // if(!unconditional)
5217     int adj;
5218     uint64_t ds_unneeded=branch_regs[i].u;
5219     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5220     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5221     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5222     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5223     ds_unneeded|=1;
5224     ds_unneeded_upper|=1;
5225     // branch taken
5226     if(!nop) {
5227       if(taken) set_jump_target(taken,(int)out);
5228       assem_debug("1:\n");
5229       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5230                     ds_unneeded,ds_unneeded_upper);
5231       // load regs
5232       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5233       address_generation(i+1,&branch_regs[i],0);
5234       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5235       ds_assemble(i+1,&branch_regs[i]);
5236       cc=get_reg(branch_regs[i].regmap,CCREG);
5237       if(cc==-1) {
5238         emit_loadreg(CCREG,cc=HOST_CCREG);
5239         // CHECK: Is the following instruction (fall thru) allocated ok?
5240       }
5241       assert(cc==HOST_CCREG);
5242       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5243       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5244       assem_debug("cycle count (adj)\n");
5245       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5246       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5247       if(internal)
5248         assem_debug("branch: internal\n");
5249       else
5250         assem_debug("branch: external\n");
5251       if(internal&&is_ds[(ba[i]-start)>>2]) {
5252         ds_assemble_entry(i);
5253       }
5254       else {
5255         add_to_linker((int)out,ba[i],internal);
5256         emit_jmp(0);
5257       }
5258     }
5259     // branch not taken
5260     cop1_usable=prev_cop1_usable;
5261     if(!unconditional) {
5262       if(nottaken1) set_jump_target(nottaken1,(int)out);
5263       set_jump_target(nottaken,(int)out);
5264       assem_debug("2:\n");
5265       if(!likely[i]) {
5266         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5267                       ds_unneeded,ds_unneeded_upper);
5268         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5269         address_generation(i+1,&branch_regs[i],0);
5270         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5271         ds_assemble(i+1,&branch_regs[i]);
5272       }
5273       cc=get_reg(branch_regs[i].regmap,CCREG);
5274       if(cc==-1&&!likely[i]) {
5275         // Cycle count isn't in a register, temporarily load it then write it out
5276         emit_loadreg(CCREG,HOST_CCREG);
5277         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5278         int jaddr=(int)out;
5279         emit_jns(0);
5280         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5281         emit_storereg(CCREG,HOST_CCREG);
5282       }
5283       else{
5284         cc=get_reg(i_regmap,CCREG);
5285         assert(cc==HOST_CCREG);
5286         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5287         int jaddr=(int)out;
5288         emit_jns(0);
5289         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5290       }
5291     }
5292   }
5293 }
5294
5295 void sjump_assemble(int i,struct regstat *i_regs)
5296 {
5297   signed char *i_regmap=i_regs->regmap;
5298   int cc;
5299   int match;
5300   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5301   assem_debug("smatch=%d\n",match);
5302   int s1h,s1l;
5303   int prev_cop1_usable=cop1_usable;
5304   int unconditional=0,nevertaken=0;
5305   int only32=0;
5306   int invert=0;
5307   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5308   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5309   if(!match) invert=1;
5310   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5311   if(i>(ba[i]-start)>>2) invert=1;
5312   #endif
5313
5314   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5315   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5316
5317   if(ooo[i]) {
5318     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5319     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5320   }
5321   else {
5322     s1l=get_reg(i_regmap,rs1[i]);
5323     s1h=get_reg(i_regmap,rs1[i]|64);
5324   }
5325   if(rs1[i]==0)
5326   {
5327     if(opcode2[i]&1) unconditional=1;
5328     else nevertaken=1;
5329     // These are never taken (r0 is never less than zero)
5330     //assert(opcode2[i]!=0);
5331     //assert(opcode2[i]!=2);
5332     //assert(opcode2[i]!=0x10);
5333     //assert(opcode2[i]!=0x12);
5334   }
5335   else {
5336     only32=(regs[i].was32>>rs1[i])&1;
5337   }
5338
5339   if(ooo[i]) {
5340     // Out of order execution (delay slot first)
5341     //printf("OOOE\n");
5342     address_generation(i+1,i_regs,regs[i].regmap_entry);
5343     ds_assemble(i+1,i_regs);
5344     int adj;
5345     uint64_t bc_unneeded=branch_regs[i].u;
5346     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5347     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5348     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5349     bc_unneeded|=1;
5350     bc_unneeded_upper|=1;
5351     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5352                   bc_unneeded,bc_unneeded_upper);
5353     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5354     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5355     if(rt1[i]==31) {
5356       int rt,return_address;
5357       rt=get_reg(branch_regs[i].regmap,31);
5358       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5359       if(rt>=0) {
5360         // Save the PC even if the branch is not taken
5361         return_address=start+i*4+8;
5362         emit_movimm(return_address,rt); // PC into link register
5363         #ifdef IMM_PREFETCH
5364         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5365         #endif
5366       }
5367     }
5368     cc=get_reg(branch_regs[i].regmap,CCREG);
5369     assert(cc==HOST_CCREG);
5370     if(unconditional)
5371       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5372     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5373     assem_debug("cycle count (adj)\n");
5374     if(unconditional) {
5375       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5376       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5377         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5378         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5379         if(internal)
5380           assem_debug("branch: internal\n");
5381         else
5382           assem_debug("branch: external\n");
5383         if(internal&&is_ds[(ba[i]-start)>>2]) {
5384           ds_assemble_entry(i);
5385         }
5386         else {
5387           add_to_linker((int)out,ba[i],internal);
5388           emit_jmp(0);
5389         }
5390         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5391         if(((u_int)out)&7) emit_addnop(0);
5392         #endif
5393       }
5394     }
5395     else if(nevertaken) {
5396       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5397       int jaddr=(int)out;
5398       emit_jns(0);
5399       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5400     }
5401     else {
5402       int nottaken=0;
5403       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5404       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5405       if(!only32)
5406       {
5407         assert(s1h>=0);
5408         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5409         {
5410           emit_test(s1h,s1h);
5411           if(invert){
5412             nottaken=(int)out;
5413             emit_jns(1);
5414           }else{
5415             add_to_linker((int)out,ba[i],internal);
5416             emit_js(0);
5417           }
5418         }
5419         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5420         {
5421           emit_test(s1h,s1h);
5422           if(invert){
5423             nottaken=(int)out;
5424             emit_js(1);
5425           }else{
5426             add_to_linker((int)out,ba[i],internal);
5427             emit_jns(0);
5428           }
5429         }
5430       } // if(!only32)
5431       else
5432       {
5433         assert(s1l>=0);
5434         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5435         {
5436           emit_test(s1l,s1l);
5437           if(invert){
5438             nottaken=(int)out;
5439             emit_jns(1);
5440           }else{
5441             add_to_linker((int)out,ba[i],internal);
5442             emit_js(0);
5443           }
5444         }
5445         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5446         {
5447           emit_test(s1l,s1l);
5448           if(invert){
5449             nottaken=(int)out;
5450             emit_js(1);
5451           }else{
5452             add_to_linker((int)out,ba[i],internal);
5453             emit_jns(0);
5454           }
5455         }
5456       } // if(!only32)
5457
5458       if(invert) {
5459         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5460         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5461           if(adj) {
5462             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5463             add_to_linker((int)out,ba[i],internal);
5464           }else{
5465             emit_addnop(13);
5466             add_to_linker((int)out,ba[i],internal*2);
5467           }
5468           emit_jmp(0);
5469         }else
5470         #endif
5471         {
5472           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5473           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5474           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5475           if(internal)
5476             assem_debug("branch: internal\n");
5477           else
5478             assem_debug("branch: external\n");
5479           if(internal&&is_ds[(ba[i]-start)>>2]) {
5480             ds_assemble_entry(i);
5481           }
5482           else {
5483             add_to_linker((int)out,ba[i],internal);
5484             emit_jmp(0);
5485           }
5486         }
5487         set_jump_target(nottaken,(int)out);
5488       }
5489
5490       if(adj) {
5491         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5492       }
5493     } // (!unconditional)
5494   } // if(ooo)
5495   else
5496   {
5497     // In-order execution (branch first)
5498     //printf("IOE\n");
5499     int nottaken=0;
5500     if(rt1[i]==31) {
5501       int rt,return_address;
5502       rt=get_reg(branch_regs[i].regmap,31);
5503       if(rt>=0) {
5504         // Save the PC even if the branch is not taken
5505         return_address=start+i*4+8;
5506         emit_movimm(return_address,rt); // PC into link register
5507         #ifdef IMM_PREFETCH
5508         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5509         #endif
5510       }
5511     }
5512     if(!unconditional) {
5513       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5514       if(!only32)
5515       {
5516         assert(s1h>=0);
5517         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5518         {
5519           emit_test(s1h,s1h);
5520           nottaken=(int)out;
5521           emit_jns(1);
5522         }
5523         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5524         {
5525           emit_test(s1h,s1h);
5526           nottaken=(int)out;
5527           emit_js(1);
5528         }
5529       } // if(!only32)
5530       else
5531       {
5532         assert(s1l>=0);
5533         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5534         {
5535           emit_test(s1l,s1l);
5536           nottaken=(int)out;
5537           emit_jns(1);
5538         }
5539         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5540         {
5541           emit_test(s1l,s1l);
5542           nottaken=(int)out;
5543           emit_js(1);
5544         }
5545       }
5546     } // if(!unconditional)
5547     int adj;
5548     uint64_t ds_unneeded=branch_regs[i].u;
5549     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5550     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5551     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5552     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5553     ds_unneeded|=1;
5554     ds_unneeded_upper|=1;
5555     // branch taken
5556     if(!nevertaken) {
5557       //assem_debug("1:\n");
5558       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5559                     ds_unneeded,ds_unneeded_upper);
5560       // load regs
5561       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5562       address_generation(i+1,&branch_regs[i],0);
5563       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5564       ds_assemble(i+1,&branch_regs[i]);
5565       cc=get_reg(branch_regs[i].regmap,CCREG);
5566       if(cc==-1) {
5567         emit_loadreg(CCREG,cc=HOST_CCREG);
5568         // CHECK: Is the following instruction (fall thru) allocated ok?
5569       }
5570       assert(cc==HOST_CCREG);
5571       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5572       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5573       assem_debug("cycle count (adj)\n");
5574       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5575       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5576       if(internal)
5577         assem_debug("branch: internal\n");
5578       else
5579         assem_debug("branch: external\n");
5580       if(internal&&is_ds[(ba[i]-start)>>2]) {
5581         ds_assemble_entry(i);
5582       }
5583       else {
5584         add_to_linker((int)out,ba[i],internal);
5585         emit_jmp(0);
5586       }
5587     }
5588     // branch not taken
5589     cop1_usable=prev_cop1_usable;
5590     if(!unconditional) {
5591       set_jump_target(nottaken,(int)out);
5592       assem_debug("1:\n");
5593       if(!likely[i]) {
5594         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5595                       ds_unneeded,ds_unneeded_upper);
5596         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5597         address_generation(i+1,&branch_regs[i],0);
5598         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5599         ds_assemble(i+1,&branch_regs[i]);
5600       }
5601       cc=get_reg(branch_regs[i].regmap,CCREG);
5602       if(cc==-1&&!likely[i]) {
5603         // Cycle count isn't in a register, temporarily load it then write it out
5604         emit_loadreg(CCREG,HOST_CCREG);
5605         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5606         int jaddr=(int)out;
5607         emit_jns(0);
5608         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5609         emit_storereg(CCREG,HOST_CCREG);
5610       }
5611       else{
5612         cc=get_reg(i_regmap,CCREG);
5613         assert(cc==HOST_CCREG);
5614         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5615         int jaddr=(int)out;
5616         emit_jns(0);
5617         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5618       }
5619     }
5620   }
5621 }
5622
5623 void fjump_assemble(int i,struct regstat *i_regs)
5624 {
5625   signed char *i_regmap=i_regs->regmap;
5626   int cc;
5627   int match;
5628   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5629   assem_debug("fmatch=%d\n",match);
5630   int fs,cs;
5631   int eaddr;
5632   int invert=0;
5633   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5634   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5635   if(!match) invert=1;
5636   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5637   if(i>(ba[i]-start)>>2) invert=1;
5638   #endif
5639
5640   if(ooo[i]) {
5641     fs=get_reg(branch_regs[i].regmap,FSREG);
5642     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5643   }
5644   else {
5645     fs=get_reg(i_regmap,FSREG);
5646   }
5647
5648   // Check cop1 unusable
5649   if(!cop1_usable) {
5650     cs=get_reg(i_regmap,CSREG);
5651     assert(cs>=0);
5652     emit_testimm(cs,0x20000000);
5653     eaddr=(int)out;
5654     emit_jeq(0);
5655     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5656     cop1_usable=1;
5657   }
5658
5659   if(ooo[i]) {
5660     // Out of order execution (delay slot first)
5661     //printf("OOOE\n");
5662     ds_assemble(i+1,i_regs);
5663     int adj;
5664     uint64_t bc_unneeded=branch_regs[i].u;
5665     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5666     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5667     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5668     bc_unneeded|=1;
5669     bc_unneeded_upper|=1;
5670     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5671                   bc_unneeded,bc_unneeded_upper);
5672     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5673     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5674     cc=get_reg(branch_regs[i].regmap,CCREG);
5675     assert(cc==HOST_CCREG);
5676     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5677     assem_debug("cycle count (adj)\n");
5678     if(1) {
5679       int nottaken=0;
5680       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5681       if(1) {
5682         assert(fs>=0);
5683         emit_testimm(fs,0x800000);
5684         if(source[i]&0x10000) // BC1T
5685         {
5686           if(invert){
5687             nottaken=(int)out;
5688             emit_jeq(1);
5689           }else{
5690             add_to_linker((int)out,ba[i],internal);
5691             emit_jne(0);
5692           }
5693         }
5694         else // BC1F
5695           if(invert){
5696             nottaken=(int)out;
5697             emit_jne(1);
5698           }else{
5699             add_to_linker((int)out,ba[i],internal);
5700             emit_jeq(0);
5701           }
5702         {
5703         }
5704       } // if(!only32)
5705
5706       if(invert) {
5707         if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5708         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5709         else if(match) emit_addnop(13);
5710         #endif
5711         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5712         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5713         if(internal)
5714           assem_debug("branch: internal\n");
5715         else
5716           assem_debug("branch: external\n");
5717         if(internal&&is_ds[(ba[i]-start)>>2]) {
5718           ds_assemble_entry(i);
5719         }
5720         else {
5721           add_to_linker((int)out,ba[i],internal);
5722           emit_jmp(0);
5723         }
5724         set_jump_target(nottaken,(int)out);
5725       }
5726
5727       if(adj) {
5728         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5729       }
5730     } // (!unconditional)
5731   } // if(ooo)
5732   else
5733   {
5734     // In-order execution (branch first)
5735     //printf("IOE\n");
5736     int nottaken=0;
5737     if(1) {
5738       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5739       if(1) {
5740         assert(fs>=0);
5741         emit_testimm(fs,0x800000);
5742         if(source[i]&0x10000) // BC1T
5743         {
5744           nottaken=(int)out;
5745           emit_jeq(1);
5746         }
5747         else // BC1F
5748         {
5749           nottaken=(int)out;
5750           emit_jne(1);
5751         }
5752       }
5753     } // if(!unconditional)
5754     int adj;
5755     uint64_t ds_unneeded=branch_regs[i].u;
5756     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5757     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5758     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5759     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5760     ds_unneeded|=1;
5761     ds_unneeded_upper|=1;
5762     // branch taken
5763     //assem_debug("1:\n");
5764     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5765                   ds_unneeded,ds_unneeded_upper);
5766     // load regs
5767     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5768     address_generation(i+1,&branch_regs[i],0);
5769     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5770     ds_assemble(i+1,&branch_regs[i]);
5771     cc=get_reg(branch_regs[i].regmap,CCREG);
5772     if(cc==-1) {
5773       emit_loadreg(CCREG,cc=HOST_CCREG);
5774       // CHECK: Is the following instruction (fall thru) allocated ok?
5775     }
5776     assert(cc==HOST_CCREG);
5777     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5778     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5779     assem_debug("cycle count (adj)\n");
5780     if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5781     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5782     if(internal)
5783       assem_debug("branch: internal\n");
5784     else
5785       assem_debug("branch: external\n");
5786     if(internal&&is_ds[(ba[i]-start)>>2]) {
5787       ds_assemble_entry(i);
5788     }
5789     else {
5790       add_to_linker((int)out,ba[i],internal);
5791       emit_jmp(0);
5792     }
5793
5794     // branch not taken
5795     if(1) { // <- FIXME (don't need this)
5796       set_jump_target(nottaken,(int)out);
5797       assem_debug("1:\n");
5798       if(!likely[i]) {
5799         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5800                       ds_unneeded,ds_unneeded_upper);
5801         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5802         address_generation(i+1,&branch_regs[i],0);
5803         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5804         ds_assemble(i+1,&branch_regs[i]);
5805       }
5806       cc=get_reg(branch_regs[i].regmap,CCREG);
5807       if(cc==-1&&!likely[i]) {
5808         // Cycle count isn't in a register, temporarily load it then write it out
5809         emit_loadreg(CCREG,HOST_CCREG);
5810         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5811         int jaddr=(int)out;
5812         emit_jns(0);
5813         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5814         emit_storereg(CCREG,HOST_CCREG);
5815       }
5816       else{
5817         cc=get_reg(i_regmap,CCREG);
5818         assert(cc==HOST_CCREG);
5819         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5820         int jaddr=(int)out;
5821         emit_jns(0);
5822         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5823       }
5824     }
5825   }
5826 }
5827
5828 static void pagespan_assemble(int i,struct regstat *i_regs)
5829 {
5830   int s1l=get_reg(i_regs->regmap,rs1[i]);
5831   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
5832   int s2l=get_reg(i_regs->regmap,rs2[i]);
5833   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
5834   int taken=0;
5835   int nottaken=0;
5836   int unconditional=0;
5837   if(rs1[i]==0)
5838   {
5839     s1l=s2l;s1h=s2h;
5840     s2l=s2h=-1;
5841   }
5842   else if(rs2[i]==0)
5843   {
5844     s2l=s2h=-1;
5845   }
5846   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
5847     s1h=s2h=-1;
5848   }
5849   int hr=0;
5850   int addr=-1,alt=-1,ntaddr=-1;
5851   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5852   else {
5853     while(hr<HOST_REGS)
5854     {
5855       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5856          (i_regs->regmap[hr]&63)!=rs1[i] &&
5857          (i_regs->regmap[hr]&63)!=rs2[i] )
5858       {
5859         addr=hr++;break;
5860       }
5861       hr++;
5862     }
5863   }
5864   while(hr<HOST_REGS)
5865   {
5866     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5867        (i_regs->regmap[hr]&63)!=rs1[i] &&
5868        (i_regs->regmap[hr]&63)!=rs2[i] )
5869     {
5870       alt=hr++;break;
5871     }
5872     hr++;
5873   }
5874   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5875   {
5876     while(hr<HOST_REGS)
5877     {
5878       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5879          (i_regs->regmap[hr]&63)!=rs1[i] &&
5880          (i_regs->regmap[hr]&63)!=rs2[i] )
5881       {
5882         ntaddr=hr;break;
5883       }
5884       hr++;
5885     }
5886   }
5887   assert(hr<HOST_REGS);
5888   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5889     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
5890   }
5891   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5892   if(opcode[i]==2) // J
5893   {
5894     unconditional=1;
5895   }
5896   if(opcode[i]==3) // JAL
5897   {
5898     // TODO: mini_ht
5899     int rt=get_reg(i_regs->regmap,31);
5900     emit_movimm(start+i*4+8,rt);
5901     unconditional=1;
5902   }
5903   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5904   {
5905     emit_mov(s1l,addr);
5906     if(opcode2[i]==9) // JALR
5907     {
5908       int rt=get_reg(i_regs->regmap,rt1[i]);
5909       emit_movimm(start+i*4+8,rt);
5910     }
5911   }
5912   if((opcode[i]&0x3f)==4) // BEQ
5913   {
5914     if(rs1[i]==rs2[i])
5915     {
5916       unconditional=1;
5917     }
5918     else
5919     #ifdef HAVE_CMOV_IMM
5920     if(s1h<0) {
5921       if(s2l>=0) emit_cmp(s1l,s2l);
5922       else emit_test(s1l,s1l);
5923       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5924     }
5925     else
5926     #endif
5927     {
5928       assert(s1l>=0);
5929       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5930       if(s1h>=0) {
5931         if(s2h>=0) emit_cmp(s1h,s2h);
5932         else emit_test(s1h,s1h);
5933         emit_cmovne_reg(alt,addr);
5934       }
5935       if(s2l>=0) emit_cmp(s1l,s2l);
5936       else emit_test(s1l,s1l);
5937       emit_cmovne_reg(alt,addr);
5938     }
5939   }
5940   if((opcode[i]&0x3f)==5) // BNE
5941   {
5942     #ifdef HAVE_CMOV_IMM
5943     if(s1h<0) {
5944       if(s2l>=0) emit_cmp(s1l,s2l);
5945       else emit_test(s1l,s1l);
5946       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5947     }
5948     else
5949     #endif
5950     {
5951       assert(s1l>=0);
5952       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5953       if(s1h>=0) {
5954         if(s2h>=0) emit_cmp(s1h,s2h);
5955         else emit_test(s1h,s1h);
5956         emit_cmovne_reg(alt,addr);
5957       }
5958       if(s2l>=0) emit_cmp(s1l,s2l);
5959       else emit_test(s1l,s1l);
5960       emit_cmovne_reg(alt,addr);
5961     }
5962   }
5963   if((opcode[i]&0x3f)==0x14) // BEQL
5964   {
5965     if(s1h>=0) {
5966       if(s2h>=0) emit_cmp(s1h,s2h);
5967       else emit_test(s1h,s1h);
5968       nottaken=(int)out;
5969       emit_jne(0);
5970     }
5971     if(s2l>=0) emit_cmp(s1l,s2l);
5972     else emit_test(s1l,s1l);
5973     if(nottaken) set_jump_target(nottaken,(int)out);
5974     nottaken=(int)out;
5975     emit_jne(0);
5976   }
5977   if((opcode[i]&0x3f)==0x15) // BNEL
5978   {
5979     if(s1h>=0) {
5980       if(s2h>=0) emit_cmp(s1h,s2h);
5981       else emit_test(s1h,s1h);
5982       taken=(int)out;
5983       emit_jne(0);
5984     }
5985     if(s2l>=0) emit_cmp(s1l,s2l);
5986     else emit_test(s1l,s1l);
5987     nottaken=(int)out;
5988     emit_jeq(0);
5989     if(taken) set_jump_target(taken,(int)out);
5990   }
5991   if((opcode[i]&0x3f)==6) // BLEZ
5992   {
5993     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5994     emit_cmpimm(s1l,1);
5995     if(s1h>=0) emit_mov(addr,ntaddr);
5996     emit_cmovl_reg(alt,addr);
5997     if(s1h>=0) {
5998       emit_test(s1h,s1h);
5999       emit_cmovne_reg(ntaddr,addr);
6000       emit_cmovs_reg(alt,addr);
6001     }
6002   }
6003   if((opcode[i]&0x3f)==7) // BGTZ
6004   {
6005     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6006     emit_cmpimm(s1l,1);
6007     if(s1h>=0) emit_mov(addr,alt);
6008     emit_cmovl_reg(ntaddr,addr);
6009     if(s1h>=0) {
6010       emit_test(s1h,s1h);
6011       emit_cmovne_reg(alt,addr);
6012       emit_cmovs_reg(ntaddr,addr);
6013     }
6014   }
6015   if((opcode[i]&0x3f)==0x16) // BLEZL
6016   {
6017     assert((opcode[i]&0x3f)!=0x16);
6018   }
6019   if((opcode[i]&0x3f)==0x17) // BGTZL
6020   {
6021     assert((opcode[i]&0x3f)!=0x17);
6022   }
6023   assert(opcode[i]!=1); // BLTZ/BGEZ
6024
6025   //FIXME: Check CSREG
6026   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6027     if((source[i]&0x30000)==0) // BC1F
6028     {
6029       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6030       emit_testimm(s1l,0x800000);
6031       emit_cmovne_reg(alt,addr);
6032     }
6033     if((source[i]&0x30000)==0x10000) // BC1T
6034     {
6035       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6036       emit_testimm(s1l,0x800000);
6037       emit_cmovne_reg(alt,addr);
6038     }
6039     if((source[i]&0x30000)==0x20000) // BC1FL
6040     {
6041       emit_testimm(s1l,0x800000);
6042       nottaken=(int)out;
6043       emit_jne(0);
6044     }
6045     if((source[i]&0x30000)==0x30000) // BC1TL
6046     {
6047       emit_testimm(s1l,0x800000);
6048       nottaken=(int)out;
6049       emit_jeq(0);
6050     }
6051   }
6052
6053   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6054   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6055   if(likely[i]||unconditional)
6056   {
6057     emit_movimm(ba[i],HOST_BTREG);
6058   }
6059   else if(addr!=HOST_BTREG)
6060   {
6061     emit_mov(addr,HOST_BTREG);
6062   }
6063   void *branch_addr=out;
6064   emit_jmp(0);
6065   int target_addr=start+i*4+5;
6066   void *stub=out;
6067   void *compiled_target_addr=check_addr(target_addr);
6068   emit_extjump_ds((int)branch_addr,target_addr);
6069   if(compiled_target_addr) {
6070     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6071     add_link(target_addr,stub);
6072   }
6073   else set_jump_target((int)branch_addr,(int)stub);
6074   if(likely[i]) {
6075     // Not-taken path
6076     set_jump_target((int)nottaken,(int)out);
6077     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6078     void *branch_addr=out;
6079     emit_jmp(0);
6080     int target_addr=start+i*4+8;
6081     void *stub=out;
6082     void *compiled_target_addr=check_addr(target_addr);
6083     emit_extjump_ds((int)branch_addr,target_addr);
6084     if(compiled_target_addr) {
6085       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6086       add_link(target_addr,stub);
6087     }
6088     else set_jump_target((int)branch_addr,(int)stub);
6089   }
6090 }
6091
6092 // Assemble the delay slot for the above
6093 static void pagespan_ds()
6094 {
6095   assem_debug("initial delay slot:\n");
6096   u_int vaddr=start+1;
6097   u_int page=get_page(vaddr);
6098   u_int vpage=get_vpage(vaddr);
6099   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6100   do_dirty_stub_ds();
6101   ll_add(jump_in+page,vaddr,(void *)out);
6102   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6103   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6104     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6105   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6106     emit_writeword(HOST_BTREG,(int)&branch_target);
6107   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6108   address_generation(0,&regs[0],regs[0].regmap_entry);
6109   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6110     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6111   cop1_usable=0;
6112   is_delayslot=0;
6113   switch(itype[0]) {
6114     case ALU:
6115       alu_assemble(0,&regs[0]);break;
6116     case IMM16:
6117       imm16_assemble(0,&regs[0]);break;
6118     case SHIFT:
6119       shift_assemble(0,&regs[0]);break;
6120     case SHIFTIMM:
6121       shiftimm_assemble(0,&regs[0]);break;
6122     case LOAD:
6123       load_assemble(0,&regs[0]);break;
6124     case LOADLR:
6125       loadlr_assemble(0,&regs[0]);break;
6126     case STORE:
6127       store_assemble(0,&regs[0]);break;
6128     case STORELR:
6129       storelr_assemble(0,&regs[0]);break;
6130     case COP0:
6131       cop0_assemble(0,&regs[0]);break;
6132     case COP1:
6133       cop1_assemble(0,&regs[0]);break;
6134     case C1LS:
6135       c1ls_assemble(0,&regs[0]);break;
6136     case COP2:
6137       cop2_assemble(0,&regs[0]);break;
6138     case C2LS:
6139       c2ls_assemble(0,&regs[0]);break;
6140     case C2OP:
6141       c2op_assemble(0,&regs[0]);break;
6142     case FCONV:
6143       fconv_assemble(0,&regs[0]);break;
6144     case FLOAT:
6145       float_assemble(0,&regs[0]);break;
6146     case FCOMP:
6147       fcomp_assemble(0,&regs[0]);break;
6148     case MULTDIV:
6149       multdiv_assemble(0,&regs[0]);break;
6150     case MOV:
6151       mov_assemble(0,&regs[0]);break;
6152     case SYSCALL:
6153     case HLECALL:
6154     case INTCALL:
6155     case SPAN:
6156     case UJUMP:
6157     case RJUMP:
6158     case CJUMP:
6159     case SJUMP:
6160     case FJUMP:
6161       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
6162   }
6163   int btaddr=get_reg(regs[0].regmap,BTREG);
6164   if(btaddr<0) {
6165     btaddr=get_reg(regs[0].regmap,-1);
6166     emit_readword((int)&branch_target,btaddr);
6167   }
6168   assert(btaddr!=HOST_CCREG);
6169   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6170 #ifdef HOST_IMM8
6171   emit_movimm(start+4,HOST_TEMPREG);
6172   emit_cmp(btaddr,HOST_TEMPREG);
6173 #else
6174   emit_cmpimm(btaddr,start+4);
6175 #endif
6176   int branch=(int)out;
6177   emit_jeq(0);
6178   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6179   emit_jmp(jump_vaddr_reg[btaddr]);
6180   set_jump_target(branch,(int)out);
6181   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6182   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6183 }
6184
6185 // Basic liveness analysis for MIPS registers
6186 void unneeded_registers(int istart,int iend,int r)
6187 {
6188   int i;
6189   uint64_t u,uu,gte_u,b,bu,gte_bu;
6190   uint64_t temp_u,temp_uu,temp_gte_u=0;
6191   uint64_t tdep;
6192   uint64_t gte_u_unknown=0;
6193   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
6194     gte_u_unknown=~0ll;
6195   if(iend==slen-1) {
6196     u=1;uu=1;
6197     gte_u=gte_u_unknown;
6198   }else{
6199     u=unneeded_reg[iend+1];
6200     uu=unneeded_reg_upper[iend+1];
6201     u=1;uu=1;
6202     gte_u=gte_unneeded[iend+1];
6203   }
6204
6205   for (i=iend;i>=istart;i--)
6206   {
6207     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6208     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6209     {
6210       // If subroutine call, flag return address as a possible branch target
6211       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6212
6213       if(ba[i]<start || ba[i]>=(start+slen*4))
6214       {
6215         // Branch out of this block, flush all regs
6216         u=1;
6217         uu=1;
6218         gte_u=gte_u_unknown;
6219         /* Hexagon hack
6220         if(itype[i]==UJUMP&&rt1[i]==31)
6221         {
6222           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6223         }
6224         if(itype[i]==RJUMP&&rs1[i]==31)
6225         {
6226           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6227         }
6228         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6229           if(itype[i]==UJUMP&&rt1[i]==31)
6230           {
6231             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6232             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6233           }
6234           if(itype[i]==RJUMP&&rs1[i]==31)
6235           {
6236             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6237             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6238           }
6239         }*/
6240         branch_unneeded_reg[i]=u;
6241         branch_unneeded_reg_upper[i]=uu;
6242         // Merge in delay slot
6243         tdep=(~uu>>rt1[i+1])&1;
6244         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6245         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6246         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6247         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6248         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6249         u|=1;uu|=1;
6250         gte_u|=gte_rt[i+1];
6251         gte_u&=~gte_rs[i+1];
6252         // If branch is "likely" (and conditional)
6253         // then we skip the delay slot on the fall-thru path
6254         if(likely[i]) {
6255           if(i<slen-1) {
6256             u&=unneeded_reg[i+2];
6257             uu&=unneeded_reg_upper[i+2];
6258             gte_u&=gte_unneeded[i+2];
6259           }
6260           else
6261           {
6262             u=1;
6263             uu=1;
6264             gte_u=gte_u_unknown;
6265           }
6266         }
6267       }
6268       else
6269       {
6270         // Internal branch, flag target
6271         bt[(ba[i]-start)>>2]=1;
6272         if(ba[i]<=start+i*4) {
6273           // Backward branch
6274           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6275           {
6276             // Unconditional branch
6277             temp_u=1;temp_uu=1;
6278             temp_gte_u=0;
6279           } else {
6280             // Conditional branch (not taken case)
6281             temp_u=unneeded_reg[i+2];
6282             temp_uu=unneeded_reg_upper[i+2];
6283             temp_gte_u&=gte_unneeded[i+2];
6284           }
6285           // Merge in delay slot
6286           tdep=(~temp_uu>>rt1[i+1])&1;
6287           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6288           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6289           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6290           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6291           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6292           temp_u|=1;temp_uu|=1;
6293           temp_gte_u|=gte_rt[i+1];
6294           temp_gte_u&=~gte_rs[i+1];
6295           // If branch is "likely" (and conditional)
6296           // then we skip the delay slot on the fall-thru path
6297           if(likely[i]) {
6298             if(i<slen-1) {
6299               temp_u&=unneeded_reg[i+2];
6300               temp_uu&=unneeded_reg_upper[i+2];
6301               temp_gte_u&=gte_unneeded[i+2];
6302             }
6303             else
6304             {
6305               temp_u=1;
6306               temp_uu=1;
6307               temp_gte_u=gte_u_unknown;
6308             }
6309           }
6310           tdep=(~temp_uu>>rt1[i])&1;
6311           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6312           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6313           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6314           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6315           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6316           temp_u|=1;temp_uu|=1;
6317           temp_gte_u|=gte_rt[i];
6318           temp_gte_u&=~gte_rs[i];
6319           unneeded_reg[i]=temp_u;
6320           unneeded_reg_upper[i]=temp_uu;
6321           gte_unneeded[i]=temp_gte_u;
6322           // Only go three levels deep.  This recursion can take an
6323           // excessive amount of time if there are a lot of nested loops.
6324           if(r<2) {
6325             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6326           }else{
6327             unneeded_reg[(ba[i]-start)>>2]=1;
6328             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6329             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
6330           }
6331         } /*else*/ if(1) {
6332           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6333           {
6334             // Unconditional branch
6335             u=unneeded_reg[(ba[i]-start)>>2];
6336             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6337             gte_u=gte_unneeded[(ba[i]-start)>>2];
6338             branch_unneeded_reg[i]=u;
6339             branch_unneeded_reg_upper[i]=uu;
6340         //u=1;
6341         //uu=1;
6342         //branch_unneeded_reg[i]=u;
6343         //branch_unneeded_reg_upper[i]=uu;
6344             // Merge in delay slot
6345             tdep=(~uu>>rt1[i+1])&1;
6346             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6347             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6348             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6349             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6350             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6351             u|=1;uu|=1;
6352             gte_u|=gte_rt[i+1];
6353             gte_u&=~gte_rs[i+1];
6354           } else {
6355             // Conditional branch
6356             b=unneeded_reg[(ba[i]-start)>>2];
6357             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6358             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6359             branch_unneeded_reg[i]=b;
6360             branch_unneeded_reg_upper[i]=bu;
6361         //b=1;
6362         //bu=1;
6363         //branch_unneeded_reg[i]=b;
6364         //branch_unneeded_reg_upper[i]=bu;
6365             // Branch delay slot
6366             tdep=(~uu>>rt1[i+1])&1;
6367             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6368             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6369             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6370             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6371             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6372             b|=1;bu|=1;
6373             gte_bu|=gte_rt[i+1];
6374             gte_bu&=~gte_rs[i+1];
6375             // If branch is "likely" then we skip the
6376             // delay slot on the fall-thru path
6377             if(likely[i]) {
6378               u=b;
6379               uu=bu;
6380               gte_u=gte_bu;
6381               if(i<slen-1) {
6382                 u&=unneeded_reg[i+2];
6383                 uu&=unneeded_reg_upper[i+2];
6384                 gte_u&=gte_unneeded[i+2];
6385         //u=1;
6386         //uu=1;
6387               }
6388             } else {
6389               u&=b;
6390               uu&=bu;
6391               gte_u&=gte_bu;
6392         //u=1;
6393         //uu=1;
6394             }
6395             if(i<slen-1) {
6396               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6397               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6398         //branch_unneeded_reg[i]=1;
6399         //branch_unneeded_reg_upper[i]=1;
6400             } else {
6401               branch_unneeded_reg[i]=1;
6402               branch_unneeded_reg_upper[i]=1;
6403             }
6404           }
6405         }
6406       }
6407     }
6408     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6409     {
6410       // SYSCALL instruction (software interrupt)
6411       u=1;
6412       uu=1;
6413     }
6414     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6415     {
6416       // ERET instruction (return from interrupt)
6417       u=1;
6418       uu=1;
6419     }
6420     //u=uu=1; // DEBUG
6421     tdep=(~uu>>rt1[i])&1;
6422     // Written registers are unneeded
6423     u|=1LL<<rt1[i];
6424     u|=1LL<<rt2[i];
6425     uu|=1LL<<rt1[i];
6426     uu|=1LL<<rt2[i];
6427     gte_u|=gte_rt[i];
6428     // Accessed registers are needed
6429     u&=~(1LL<<rs1[i]);
6430     u&=~(1LL<<rs2[i]);
6431     uu&=~(1LL<<us1[i]);
6432     uu&=~(1LL<<us2[i]);
6433     gte_u&=~gte_rs[i];
6434     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
6435       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
6436     // Source-target dependencies
6437     uu&=~(tdep<<dep1[i]);
6438     uu&=~(tdep<<dep2[i]);
6439     // R0 is always unneeded
6440     u|=1;uu|=1;
6441     // Save it
6442     unneeded_reg[i]=u;
6443     unneeded_reg_upper[i]=uu;
6444     gte_unneeded[i]=gte_u;
6445     /*
6446     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6447     printf("U:");
6448     int r;
6449     for(r=1;r<=CCREG;r++) {
6450       if((unneeded_reg[i]>>r)&1) {
6451         if(r==HIREG) printf(" HI");
6452         else if(r==LOREG) printf(" LO");
6453         else printf(" r%d",r);
6454       }
6455     }
6456     printf(" UU:");
6457     for(r=1;r<=CCREG;r++) {
6458       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6459         if(r==HIREG) printf(" HI");
6460         else if(r==LOREG) printf(" LO");
6461         else printf(" r%d",r);
6462       }
6463     }
6464     printf("\n");*/
6465   }
6466   for (i=iend;i>=istart;i--)
6467   {
6468     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6469   }
6470 }
6471
6472 // Write back dirty registers as soon as we will no longer modify them,
6473 // so that we don't end up with lots of writes at the branches.
6474 void clean_registers(int istart,int iend,int wr)
6475 {
6476   int i;
6477   int r;
6478   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
6479   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
6480   if(iend==slen-1) {
6481     will_dirty_i=will_dirty_next=0;
6482     wont_dirty_i=wont_dirty_next=0;
6483   }else{
6484     will_dirty_i=will_dirty_next=will_dirty[iend+1];
6485     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
6486   }
6487   for (i=iend;i>=istart;i--)
6488   {
6489     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6490     {
6491       if(ba[i]<start || ba[i]>=(start+slen*4))
6492       {
6493         // Branch out of this block, flush all regs
6494         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6495         {
6496           // Unconditional branch
6497           will_dirty_i=0;
6498           wont_dirty_i=0;
6499           // Merge in delay slot (will dirty)
6500           for(r=0;r<HOST_REGS;r++) {
6501             if(r!=EXCLUDE_REG) {
6502               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6503               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6504               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6505               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6506               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6507               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6508               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6509               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6510               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6511               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6512               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6513               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6514               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6515               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6516             }
6517           }
6518         }
6519         else
6520         {
6521           // Conditional branch
6522           will_dirty_i=0;
6523           wont_dirty_i=wont_dirty_next;
6524           // Merge in delay slot (will dirty)
6525           for(r=0;r<HOST_REGS;r++) {
6526             if(r!=EXCLUDE_REG) {
6527               if(!likely[i]) {
6528                 // Might not dirty if likely branch is not taken
6529                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6530                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6531                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6532                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6533                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6534                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
6535                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6536                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6537                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6538                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6539                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6540                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6541                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6542                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6543               }
6544             }
6545           }
6546         }
6547         // Merge in delay slot (wont dirty)
6548         for(r=0;r<HOST_REGS;r++) {
6549           if(r!=EXCLUDE_REG) {
6550             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6551             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6552             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6553             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6554             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6555             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6556             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6557             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6558             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6559             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6560           }
6561         }
6562         if(wr) {
6563           #ifndef DESTRUCTIVE_WRITEBACK
6564           branch_regs[i].dirty&=wont_dirty_i;
6565           #endif
6566           branch_regs[i].dirty|=will_dirty_i;
6567         }
6568       }
6569       else
6570       {
6571         // Internal branch
6572         if(ba[i]<=start+i*4) {
6573           // Backward branch
6574           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6575           {
6576             // Unconditional branch
6577             temp_will_dirty=0;
6578             temp_wont_dirty=0;
6579             // Merge in delay slot (will dirty)
6580             for(r=0;r<HOST_REGS;r++) {
6581               if(r!=EXCLUDE_REG) {
6582                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6583                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6584                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6585                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6586                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6587                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6588                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6589                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6590                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6591                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6592                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6593                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6594                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6595                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6596               }
6597             }
6598           } else {
6599             // Conditional branch (not taken case)
6600             temp_will_dirty=will_dirty_next;
6601             temp_wont_dirty=wont_dirty_next;
6602             // Merge in delay slot (will dirty)
6603             for(r=0;r<HOST_REGS;r++) {
6604               if(r!=EXCLUDE_REG) {
6605                 if(!likely[i]) {
6606                   // Will not dirty if likely branch is not taken
6607                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6608                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6609                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6610                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6611                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6612                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
6613                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6614                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6615                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6616                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6617                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6618                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6619                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6620                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6621                 }
6622               }
6623             }
6624           }
6625           // Merge in delay slot (wont dirty)
6626           for(r=0;r<HOST_REGS;r++) {
6627             if(r!=EXCLUDE_REG) {
6628               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6629               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6630               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6631               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6632               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6633               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6634               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6635               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6636               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6637               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6638             }
6639           }
6640           // Deal with changed mappings
6641           if(i<iend) {
6642             for(r=0;r<HOST_REGS;r++) {
6643               if(r!=EXCLUDE_REG) {
6644                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
6645                   temp_will_dirty&=~(1<<r);
6646                   temp_wont_dirty&=~(1<<r);
6647                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6648                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6649                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6650                   } else {
6651                     temp_will_dirty|=1<<r;
6652                     temp_wont_dirty|=1<<r;
6653                   }
6654                 }
6655               }
6656             }
6657           }
6658           if(wr) {
6659             will_dirty[i]=temp_will_dirty;
6660             wont_dirty[i]=temp_wont_dirty;
6661             clean_registers((ba[i]-start)>>2,i-1,0);
6662           }else{
6663             // Limit recursion.  It can take an excessive amount
6664             // of time if there are a lot of nested loops.
6665             will_dirty[(ba[i]-start)>>2]=0;
6666             wont_dirty[(ba[i]-start)>>2]=-1;
6667           }
6668         }
6669         /*else*/ if(1)
6670         {
6671           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6672           {
6673             // Unconditional branch
6674             will_dirty_i=0;
6675             wont_dirty_i=0;
6676           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6677             for(r=0;r<HOST_REGS;r++) {
6678               if(r!=EXCLUDE_REG) {
6679                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6680                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
6681                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6682                 }
6683                 if(branch_regs[i].regmap[r]>=0) {
6684                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6685                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6686                 }
6687               }
6688             }
6689           //}
6690             // Merge in delay slot
6691             for(r=0;r<HOST_REGS;r++) {
6692               if(r!=EXCLUDE_REG) {
6693                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6694                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6695                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6696                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6697                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6698                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6699                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6700                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6701                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6702                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6703                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6704                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6705                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6706                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6707               }
6708             }
6709           } else {
6710             // Conditional branch
6711             will_dirty_i=will_dirty_next;
6712             wont_dirty_i=wont_dirty_next;
6713           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6714             for(r=0;r<HOST_REGS;r++) {
6715               if(r!=EXCLUDE_REG) {
6716                 signed char target_reg=branch_regs[i].regmap[r];
6717                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6718                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6719                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6720                 }
6721                 else if(target_reg>=0) {
6722                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6723                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6724                 }
6725                 // Treat delay slot as part of branch too
6726                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6727                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6728                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6729                 }
6730                 else
6731                 {
6732                   will_dirty[i+1]&=~(1<<r);
6733                 }*/
6734               }
6735             }
6736           //}
6737             // Merge in delay slot
6738             for(r=0;r<HOST_REGS;r++) {
6739               if(r!=EXCLUDE_REG) {
6740                 if(!likely[i]) {
6741                   // Might not dirty if likely branch is not taken
6742                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6743                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6744                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6745                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6746                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6747                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6748                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6749                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6750                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6751                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6752                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6753                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6754                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6755                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6756                 }
6757               }
6758             }
6759           }
6760           // Merge in delay slot (won't dirty)
6761           for(r=0;r<HOST_REGS;r++) {
6762             if(r!=EXCLUDE_REG) {
6763               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6764               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6765               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6766               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6767               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6768               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6769               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6770               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6771               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6772               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6773             }
6774           }
6775           if(wr) {
6776             #ifndef DESTRUCTIVE_WRITEBACK
6777             branch_regs[i].dirty&=wont_dirty_i;
6778             #endif
6779             branch_regs[i].dirty|=will_dirty_i;
6780           }
6781         }
6782       }
6783     }
6784     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6785     {
6786       // SYSCALL instruction (software interrupt)
6787       will_dirty_i=0;
6788       wont_dirty_i=0;
6789     }
6790     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6791     {
6792       // ERET instruction (return from interrupt)
6793       will_dirty_i=0;
6794       wont_dirty_i=0;
6795     }
6796     will_dirty_next=will_dirty_i;
6797     wont_dirty_next=wont_dirty_i;
6798     for(r=0;r<HOST_REGS;r++) {
6799       if(r!=EXCLUDE_REG) {
6800         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6801         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6802         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6803         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6804         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6805         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6806         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6807         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6808         if(i>istart) {
6809           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
6810           {
6811             // Don't store a register immediately after writing it,
6812             // may prevent dual-issue.
6813             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6814             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6815           }
6816         }
6817       }
6818     }
6819     // Save it
6820     will_dirty[i]=will_dirty_i;
6821     wont_dirty[i]=wont_dirty_i;
6822     // Mark registers that won't be dirtied as not dirty
6823     if(wr) {
6824       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6825       for(r=0;r<HOST_REGS;r++) {
6826         if((will_dirty_i>>r)&1) {
6827           printf(" r%d",r);
6828         }
6829       }
6830       printf("\n");*/
6831
6832       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
6833         regs[i].dirty|=will_dirty_i;
6834         #ifndef DESTRUCTIVE_WRITEBACK
6835         regs[i].dirty&=wont_dirty_i;
6836         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6837         {
6838           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
6839             for(r=0;r<HOST_REGS;r++) {
6840               if(r!=EXCLUDE_REG) {
6841                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6842                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6843                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6844               }
6845             }
6846           }
6847         }
6848         else
6849         {
6850           if(i<iend) {
6851             for(r=0;r<HOST_REGS;r++) {
6852               if(r!=EXCLUDE_REG) {
6853                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6854                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6855                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6856               }
6857             }
6858           }
6859         }
6860         #endif
6861       //}
6862     }
6863     // Deal with changed mappings
6864     temp_will_dirty=will_dirty_i;
6865     temp_wont_dirty=wont_dirty_i;
6866     for(r=0;r<HOST_REGS;r++) {
6867       if(r!=EXCLUDE_REG) {
6868         int nr;
6869         if(regs[i].regmap[r]==regmap_pre[i][r]) {
6870           if(wr) {
6871             #ifndef DESTRUCTIVE_WRITEBACK
6872             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6873             #endif
6874             regs[i].wasdirty|=will_dirty_i&(1<<r);
6875           }
6876         }
6877         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6878           // Register moved to a different register
6879           will_dirty_i&=~(1<<r);
6880           wont_dirty_i&=~(1<<r);
6881           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6882           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6883           if(wr) {
6884             #ifndef DESTRUCTIVE_WRITEBACK
6885             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6886             #endif
6887             regs[i].wasdirty|=will_dirty_i&(1<<r);
6888           }
6889         }
6890         else {
6891           will_dirty_i&=~(1<<r);
6892           wont_dirty_i&=~(1<<r);
6893           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6894             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6895             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6896           } else {
6897             wont_dirty_i|=1<<r;
6898             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6899           }
6900         }
6901       }
6902     }
6903   }
6904 }
6905
6906 #ifdef DISASM
6907   /* disassembly */
6908 void disassemble_inst(int i)
6909 {
6910     if (bt[i]) printf("*"); else printf(" ");
6911     switch(itype[i]) {
6912       case UJUMP:
6913         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6914       case CJUMP:
6915         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6916       case SJUMP:
6917         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6918       case FJUMP:
6919         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6920       case RJUMP:
6921         if (opcode[i]==0x9&&rt1[i]!=31)
6922           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6923         else
6924           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6925         break;
6926       case SPAN:
6927         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6928       case IMM16:
6929         if(opcode[i]==0xf) //LUI
6930           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6931         else
6932           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6933         break;
6934       case LOAD:
6935       case LOADLR:
6936         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6937         break;
6938       case STORE:
6939       case STORELR:
6940         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6941         break;
6942       case ALU:
6943       case SHIFT:
6944         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6945         break;
6946       case MULTDIV:
6947         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6948         break;
6949       case SHIFTIMM:
6950         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6951         break;
6952       case MOV:
6953         if((opcode2[i]&0x1d)==0x10)
6954           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6955         else if((opcode2[i]&0x1d)==0x11)
6956           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6957         else
6958           printf (" %x: %s\n",start+i*4,insn[i]);
6959         break;
6960       case COP0:
6961         if(opcode2[i]==0)
6962           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6963         else if(opcode2[i]==4)
6964           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6965         else printf (" %x: %s\n",start+i*4,insn[i]);
6966         break;
6967       case COP1:
6968         if(opcode2[i]<3)
6969           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6970         else if(opcode2[i]>3)
6971           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6972         else printf (" %x: %s\n",start+i*4,insn[i]);
6973         break;
6974       case COP2:
6975         if(opcode2[i]<3)
6976           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
6977         else if(opcode2[i]>3)
6978           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
6979         else printf (" %x: %s\n",start+i*4,insn[i]);
6980         break;
6981       case C1LS:
6982         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6983         break;
6984       case C2LS:
6985         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6986         break;
6987       case INTCALL:
6988         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6989         break;
6990       default:
6991         //printf (" %s %8x\n",insn[i],source[i]);
6992         printf (" %x: %s\n",start+i*4,insn[i]);
6993     }
6994 }
6995 #else
6996 static void disassemble_inst(int i) {}
6997 #endif // DISASM
6998
6999 #define DRC_TEST_VAL 0x74657374
7000
7001 static int new_dynarec_test(void)
7002 {
7003   int (*testfunc)(void) = (void *)out;
7004   void *beginning;
7005   int ret;
7006
7007   beginning = start_block();
7008   emit_movimm(DRC_TEST_VAL,0); // test
7009   emit_jmpreg(14);
7010   literal_pool(0);
7011   end_block(beginning);
7012   SysPrintf("testing if we can run recompiled code..\n");
7013   ret = testfunc();
7014   if (ret == DRC_TEST_VAL)
7015     SysPrintf("test passed.\n");
7016   else
7017     SysPrintf("test failed: %08x\n", ret);
7018   out=(u_char *)BASE_ADDR;
7019   return ret == DRC_TEST_VAL;
7020 }
7021
7022 // clear the state completely, instead of just marking
7023 // things invalid like invalidate_all_pages() does
7024 void new_dynarec_clear_full()
7025 {
7026   int n;
7027   out=(u_char *)BASE_ADDR;
7028   memset(invalid_code,1,sizeof(invalid_code));
7029   memset(hash_table,0xff,sizeof(hash_table));
7030   memset(mini_ht,-1,sizeof(mini_ht));
7031   memset(restore_candidate,0,sizeof(restore_candidate));
7032   memset(shadow,0,sizeof(shadow));
7033   copy=shadow;
7034   expirep=16384; // Expiry pointer, +2 blocks
7035   pending_exception=0;
7036   literalcount=0;
7037   stop_after_jal=0;
7038   inv_code_start=inv_code_end=~0;
7039   // TLB
7040   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7041   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7042   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7043 }
7044
7045 void new_dynarec_init()
7046 {
7047   SysPrintf("Init new dynarec\n");
7048
7049   // allocate/prepare a buffer for translation cache
7050   // see assem_arm.h for some explanation
7051 #if   defined(BASE_ADDR_FIXED)
7052   if (mmap (translation_cache, 1 << TARGET_SIZE_2,
7053             PROT_READ | PROT_WRITE | PROT_EXEC,
7054             MAP_PRIVATE | MAP_ANONYMOUS,
7055             -1, 0) != translation_cache) {
7056     SysPrintf("mmap() failed: %s\n", strerror(errno));
7057     SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
7058     abort();
7059   }
7060 #elif defined(BASE_ADDR_DYNAMIC)
7061   #ifdef VITA
7062   sceBlock = getVMBlock();//sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
7063   if (sceBlock < 0)
7064     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
7065   int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
7066   if (ret < 0)
7067     SysPrintf("sceKernelGetMemBlockBase failed\n");
7068   sceClibPrintf("translation_cache = 0x%08X \n ", translation_cache);
7069   #else
7070   translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
7071             PROT_READ | PROT_WRITE | PROT_EXEC,
7072             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
7073   if (translation_cache == MAP_FAILED) {
7074     SysPrintf("mmap() failed: %s\n", strerror(errno));
7075     abort();
7076   }
7077   #endif
7078 #else
7079   #ifndef NO_WRITE_EXEC
7080   // not all systems allow execute in data segment by default
7081   if (mprotect(out, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
7082     SysPrintf("mprotect() failed: %s\n", strerror(errno));
7083   #endif
7084 #endif
7085   out=(u_char *)BASE_ADDR;
7086   cycle_multiplier=200;
7087   new_dynarec_clear_full();
7088 #ifdef HOST_IMM8
7089   // Copy this into local area so we don't have to put it in every literal pool
7090   invc_ptr=invalid_code;
7091 #endif
7092   arch_init();
7093   new_dynarec_test();
7094 #ifndef RAM_FIXED
7095   ram_offset=(u_int)rdram-0x80000000;
7096 #endif
7097   if (ram_offset!=0)
7098     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
7099 }
7100
7101 void new_dynarec_cleanup()
7102 {
7103   int n;
7104 #if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
7105   #ifdef VITA
7106   //sceKernelFreeMemBlock(sceBlock);
7107   //sceBlock = -1;
7108   #else
7109   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0)
7110     SysPrintf("munmap() failed\n");
7111   #endif
7112 #endif
7113   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7114   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7115   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7116   #ifdef ROM_COPY
7117   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
7118   #endif
7119 }
7120
7121 static u_int *get_source_start(u_int addr, u_int *limit)
7122 {
7123   if (addr < 0x00200000 ||
7124     (0xa0000000 <= addr && addr < 0xa0200000)) {
7125     // used for BIOS calls mostly?
7126     *limit = (addr&0xa0000000)|0x00200000;
7127     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7128   }
7129   else if (!Config.HLE && (
7130     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7131     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7132     // BIOS
7133     *limit = (addr & 0xfff00000) | 0x80000;
7134     return (u_int *)((u_int)psxR + (addr&0x7ffff));
7135   }
7136   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
7137     *limit = (addr & 0x80600000) + 0x00200000;
7138     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7139   }
7140   return NULL;
7141 }
7142
7143 static u_int scan_for_ret(u_int addr)
7144 {
7145   u_int limit = 0;
7146   u_int *mem;
7147
7148   mem = get_source_start(addr, &limit);
7149   if (mem == NULL)
7150     return addr;
7151
7152   if (limit > addr + 0x1000)
7153     limit = addr + 0x1000;
7154   for (; addr < limit; addr += 4, mem++) {
7155     if (*mem == 0x03e00008) // jr $ra
7156       return addr + 8;
7157   }
7158   return addr;
7159 }
7160
7161 struct savestate_block {
7162   uint32_t addr;
7163   uint32_t regflags;
7164 };
7165
7166 static int addr_cmp(const void *p1_, const void *p2_)
7167 {
7168   const struct savestate_block *p1 = p1_, *p2 = p2_;
7169   return p1->addr - p2->addr;
7170 }
7171
7172 int new_dynarec_save_blocks(void *save, int size)
7173 {
7174   struct savestate_block *blocks = save;
7175   int maxcount = size / sizeof(blocks[0]);
7176   struct savestate_block tmp_blocks[1024];
7177   struct ll_entry *head;
7178   int p, s, d, o, bcnt;
7179   u_int addr;
7180
7181   o = 0;
7182   for (p = 0; p < sizeof(jump_in) / sizeof(jump_in[0]); p++) {
7183     bcnt = 0;
7184     for (head = jump_in[p]; head != NULL; head = head->next) {
7185       tmp_blocks[bcnt].addr = head->vaddr;
7186       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
7187       bcnt++;
7188     }
7189     if (bcnt < 1)
7190       continue;
7191     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
7192
7193     addr = tmp_blocks[0].addr;
7194     for (s = d = 0; s < bcnt; s++) {
7195       if (tmp_blocks[s].addr < addr)
7196         continue;
7197       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
7198         tmp_blocks[d++] = tmp_blocks[s];
7199       addr = scan_for_ret(tmp_blocks[s].addr);
7200     }
7201
7202     if (o + d > maxcount)
7203       d = maxcount - o;
7204     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
7205     o += d;
7206   }
7207
7208   return o * sizeof(blocks[0]);
7209 }
7210
7211 void new_dynarec_load_blocks(const void *save, int size)
7212 {
7213   const struct savestate_block *blocks = save;
7214   int count = size / sizeof(blocks[0]);
7215   u_int regs_save[32];
7216   uint32_t f;
7217   int i, b;
7218
7219   get_addr(psxRegs.pc);
7220
7221   // change GPRs for speculation to at least partially work..
7222   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
7223   for (i = 1; i < 32; i++)
7224     psxRegs.GPR.r[i] = 0x80000000;
7225
7226   for (b = 0; b < count; b++) {
7227     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7228       if (f & 1)
7229         psxRegs.GPR.r[i] = 0x1f800000;
7230     }
7231
7232     get_addr(blocks[b].addr);
7233
7234     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7235       if (f & 1)
7236         psxRegs.GPR.r[i] = 0x80000000;
7237     }
7238   }
7239
7240   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
7241 }
7242
7243 int new_recompile_block(int addr)
7244 {
7245   u_int pagelimit = 0;
7246   u_int state_rflags = 0;
7247   int i;
7248
7249   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7250   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7251   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7252   //if(debug)
7253   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7254   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7255   /*if(Count>=312978186) {
7256     rlist();
7257   }*/
7258   //rlist();
7259
7260   // this is just for speculation
7261   for (i = 1; i < 32; i++) {
7262     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
7263       state_rflags |= 1 << i;
7264   }
7265
7266   start = (u_int)addr&~3;
7267   //assert(((u_int)addr&1)==0);
7268   new_dynarec_did_compile=1;
7269   if (Config.HLE && start == 0x80001000) // hlecall
7270   {
7271     // XXX: is this enough? Maybe check hleSoftCall?
7272     void *beginning=start_block();
7273     u_int page=get_page(start);
7274
7275     invalid_code[start>>12]=0;
7276     emit_movimm(start,0);
7277     emit_writeword(0,(int)&pcaddr);
7278     emit_jmp((int)new_dyna_leave);
7279     literal_pool(0);
7280     end_block(beginning);
7281     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
7282     return 0;
7283   }
7284
7285   source = get_source_start(start, &pagelimit);
7286   if (source == NULL) {
7287     SysPrintf("Compile at bogus memory address: %08x\n", addr);
7288     exit(1);
7289   }
7290
7291   /* Pass 1: disassemble */
7292   /* Pass 2: register dependencies, branch targets */
7293   /* Pass 3: register allocation */
7294   /* Pass 4: branch dependencies */
7295   /* Pass 5: pre-alloc */
7296   /* Pass 6: optimize clean/dirty state */
7297   /* Pass 7: flag 32-bit registers */
7298   /* Pass 8: assembly */
7299   /* Pass 9: linker */
7300   /* Pass 10: garbage collection / free memory */
7301
7302   int j;
7303   int done=0;
7304   unsigned int type,op,op2;
7305
7306   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7307
7308   /* Pass 1 disassembly */
7309
7310   for(i=0;!done;i++) {
7311     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
7312     minimum_free_regs[i]=0;
7313     opcode[i]=op=source[i]>>26;
7314     switch(op)
7315     {
7316       case 0x00: strcpy(insn[i],"special"); type=NI;
7317         op2=source[i]&0x3f;
7318         switch(op2)
7319         {
7320           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7321           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7322           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7323           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7324           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7325           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7326           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7327           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7328           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7329           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7330           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7331           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7332           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7333           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7334           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7335           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7336           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7337           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7338           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7339           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7340           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7341           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7342           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7343           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7344           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7345           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7346           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7347           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7348           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7349           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7350           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7351           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7352           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7353           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7354           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7355 #if 0
7356           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7357           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7358           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7359           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7360           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7361           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7362           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7363           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7364           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7365           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7366           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7367           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7368           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7369           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7370           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7371           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7372           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7373 #endif
7374         }
7375         break;
7376       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7377         op2=(source[i]>>16)&0x1f;
7378         switch(op2)
7379         {
7380           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7381           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7382           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7383           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7384           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7385           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7386           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7387           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7388           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7389           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7390           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7391           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7392           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7393           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7394         }
7395         break;
7396       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7397       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7398       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7399       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7400       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7401       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7402       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7403       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7404       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7405       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7406       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7407       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7408       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7409       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7410       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7411         op2=(source[i]>>21)&0x1f;
7412         switch(op2)
7413         {
7414           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7415           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7416           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7417           switch(source[i]&0x3f)
7418           {
7419             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7420             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7421             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7422             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7423             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7424             //case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7425           }
7426         }
7427         break;
7428       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7429         op2=(source[i]>>21)&0x1f;
7430         switch(op2)
7431         {
7432           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7433           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7434           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7435           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7436           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7437           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7438           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7439           switch((source[i]>>16)&0x3)
7440           {
7441             case 0x00: strcpy(insn[i],"BC1F"); break;
7442             case 0x01: strcpy(insn[i],"BC1T"); break;
7443             case 0x02: strcpy(insn[i],"BC1FL"); break;
7444             case 0x03: strcpy(insn[i],"BC1TL"); break;
7445           }
7446           break;
7447           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7448           switch(source[i]&0x3f)
7449           {
7450             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7451             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7452             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7453             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7454             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7455             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7456             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7457             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7458             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7459             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7460             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7461             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7462             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7463             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7464             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7465             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7466             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7467             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7468             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7469             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7470             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7471             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7472             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7473             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7474             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7475             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7476             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7477             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7478             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7479             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7480             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7481             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7482             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7483             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7484             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7485           }
7486           break;
7487           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7488           switch(source[i]&0x3f)
7489           {
7490             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7491             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
7492             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
7493             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
7494             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
7495             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
7496             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
7497             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
7498             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
7499             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
7500             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
7501             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
7502             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
7503             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
7504             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
7505             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
7506             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
7507             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
7508             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
7509             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
7510             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
7511             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
7512             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
7513             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
7514             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
7515             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
7516             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
7517             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
7518             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
7519             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
7520             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
7521             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
7522             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
7523             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
7524             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
7525           }
7526           break;
7527           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
7528           switch(source[i]&0x3f)
7529           {
7530             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
7531             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
7532           }
7533           break;
7534           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
7535           switch(source[i]&0x3f)
7536           {
7537             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
7538             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
7539           }
7540           break;
7541         }
7542         break;
7543 #if 0
7544       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
7545       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
7546       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
7547       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
7548       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
7549       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
7550       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
7551       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
7552 #endif
7553       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
7554       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
7555       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
7556       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
7557       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
7558       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
7559       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
7560 #if 0
7561       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
7562 #endif
7563       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
7564       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
7565       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
7566       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
7567 #if 0
7568       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
7569       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
7570 #endif
7571       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
7572       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
7573       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
7574       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
7575 #if 0
7576       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
7577       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
7578       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
7579 #endif
7580       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
7581       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
7582 #if 0
7583       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
7584       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
7585       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
7586 #endif
7587       case 0x12: strcpy(insn[i],"COP2"); type=NI;
7588         op2=(source[i]>>21)&0x1f;
7589         //if (op2 & 0x10) {
7590         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
7591           if (gte_handlers[source[i]&0x3f]!=NULL) {
7592             if (gte_regnames[source[i]&0x3f]!=NULL)
7593               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
7594             else
7595               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
7596             type=C2OP;
7597           }
7598         }
7599         else switch(op2)
7600         {
7601           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
7602           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
7603           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
7604           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
7605         }
7606         break;
7607       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
7608       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
7609       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
7610       default: strcpy(insn[i],"???"); type=NI;
7611         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
7612         break;
7613     }
7614     itype[i]=type;
7615     opcode2[i]=op2;
7616     /* Get registers/immediates */
7617     lt1[i]=0;
7618     us1[i]=0;
7619     us2[i]=0;
7620     dep1[i]=0;
7621     dep2[i]=0;
7622     gte_rs[i]=gte_rt[i]=0;
7623     switch(type) {
7624       case LOAD:
7625         rs1[i]=(source[i]>>21)&0x1f;
7626         rs2[i]=0;
7627         rt1[i]=(source[i]>>16)&0x1f;
7628         rt2[i]=0;
7629         imm[i]=(short)source[i];
7630         break;
7631       case STORE:
7632       case STORELR:
7633         rs1[i]=(source[i]>>21)&0x1f;
7634         rs2[i]=(source[i]>>16)&0x1f;
7635         rt1[i]=0;
7636         rt2[i]=0;
7637         imm[i]=(short)source[i];
7638         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
7639         break;
7640       case LOADLR:
7641         // LWL/LWR only load part of the register,
7642         // therefore the target register must be treated as a source too
7643         rs1[i]=(source[i]>>21)&0x1f;
7644         rs2[i]=(source[i]>>16)&0x1f;
7645         rt1[i]=(source[i]>>16)&0x1f;
7646         rt2[i]=0;
7647         imm[i]=(short)source[i];
7648         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
7649         if(op==0x26) dep1[i]=rt1[i]; // LWR
7650         break;
7651       case IMM16:
7652         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
7653         else rs1[i]=(source[i]>>21)&0x1f;
7654         rs2[i]=0;
7655         rt1[i]=(source[i]>>16)&0x1f;
7656         rt2[i]=0;
7657         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
7658           imm[i]=(unsigned short)source[i];
7659         }else{
7660           imm[i]=(short)source[i];
7661         }
7662         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
7663         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
7664         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
7665         break;
7666       case UJUMP:
7667         rs1[i]=0;
7668         rs2[i]=0;
7669         rt1[i]=0;
7670         rt2[i]=0;
7671         // The JAL instruction writes to r31.
7672         if (op&1) {
7673           rt1[i]=31;
7674         }
7675         rs2[i]=CCREG;
7676         break;
7677       case RJUMP:
7678         rs1[i]=(source[i]>>21)&0x1f;
7679         rs2[i]=0;
7680         rt1[i]=0;
7681         rt2[i]=0;
7682         // The JALR instruction writes to rd.
7683         if (op2&1) {
7684           rt1[i]=(source[i]>>11)&0x1f;
7685         }
7686         rs2[i]=CCREG;
7687         break;
7688       case CJUMP:
7689         rs1[i]=(source[i]>>21)&0x1f;
7690         rs2[i]=(source[i]>>16)&0x1f;
7691         rt1[i]=0;
7692         rt2[i]=0;
7693         if(op&2) { // BGTZ/BLEZ
7694           rs2[i]=0;
7695         }
7696         us1[i]=rs1[i];
7697         us2[i]=rs2[i];
7698         likely[i]=op>>4;
7699         break;
7700       case SJUMP:
7701         rs1[i]=(source[i]>>21)&0x1f;
7702         rs2[i]=CCREG;
7703         rt1[i]=0;
7704         rt2[i]=0;
7705         us1[i]=rs1[i];
7706         if(op2&0x10) { // BxxAL
7707           rt1[i]=31;
7708           // NOTE: If the branch is not taken, r31 is still overwritten
7709         }
7710         likely[i]=(op2&2)>>1;
7711         break;
7712       case FJUMP:
7713         rs1[i]=FSREG;
7714         rs2[i]=CSREG;
7715         rt1[i]=0;
7716         rt2[i]=0;
7717         likely[i]=((source[i])>>17)&1;
7718         break;
7719       case ALU:
7720         rs1[i]=(source[i]>>21)&0x1f; // source
7721         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
7722         rt1[i]=(source[i]>>11)&0x1f; // destination
7723         rt2[i]=0;
7724         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7725           us1[i]=rs1[i];us2[i]=rs2[i];
7726         }
7727         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7728           dep1[i]=rs1[i];dep2[i]=rs2[i];
7729         }
7730         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
7731           dep1[i]=rs1[i];dep2[i]=rs2[i];
7732         }
7733         break;
7734       case MULTDIV:
7735         rs1[i]=(source[i]>>21)&0x1f; // source
7736         rs2[i]=(source[i]>>16)&0x1f; // divisor
7737         rt1[i]=HIREG;
7738         rt2[i]=LOREG;
7739         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7740           us1[i]=rs1[i];us2[i]=rs2[i];
7741         }
7742         break;
7743       case MOV:
7744         rs1[i]=0;
7745         rs2[i]=0;
7746         rt1[i]=0;
7747         rt2[i]=0;
7748         if(op2==0x10) rs1[i]=HIREG; // MFHI
7749         if(op2==0x11) rt1[i]=HIREG; // MTHI
7750         if(op2==0x12) rs1[i]=LOREG; // MFLO
7751         if(op2==0x13) rt1[i]=LOREG; // MTLO
7752         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
7753         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
7754         dep1[i]=rs1[i];
7755         break;
7756       case SHIFT:
7757         rs1[i]=(source[i]>>16)&0x1f; // target of shift
7758         rs2[i]=(source[i]>>21)&0x1f; // shift amount
7759         rt1[i]=(source[i]>>11)&0x1f; // destination
7760         rt2[i]=0;
7761         // DSLLV/DSRLV/DSRAV are 64-bit
7762         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
7763         break;
7764       case SHIFTIMM:
7765         rs1[i]=(source[i]>>16)&0x1f;
7766         rs2[i]=0;
7767         rt1[i]=(source[i]>>11)&0x1f;
7768         rt2[i]=0;
7769         imm[i]=(source[i]>>6)&0x1f;
7770         // DSxx32 instructions
7771         if(op2>=0x3c) imm[i]|=0x20;
7772         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
7773         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
7774         break;
7775       case COP0:
7776         rs1[i]=0;
7777         rs2[i]=0;
7778         rt1[i]=0;
7779         rt2[i]=0;
7780         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
7781         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
7782         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
7783         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
7784         break;
7785       case COP1:
7786         rs1[i]=0;
7787         rs2[i]=0;
7788         rt1[i]=0;
7789         rt2[i]=0;
7790         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
7791         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
7792         if(op2==5) us1[i]=rs1[i]; // DMTC1
7793         rs2[i]=CSREG;
7794         break;
7795       case COP2:
7796         rs1[i]=0;
7797         rs2[i]=0;
7798         rt1[i]=0;
7799         rt2[i]=0;
7800         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
7801         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
7802         rs2[i]=CSREG;
7803         int gr=(source[i]>>11)&0x1F;
7804         switch(op2)
7805         {
7806           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
7807           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
7808           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
7809           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
7810         }
7811         break;
7812       case C1LS:
7813         rs1[i]=(source[i]>>21)&0x1F;
7814         rs2[i]=CSREG;
7815         rt1[i]=0;
7816         rt2[i]=0;
7817         imm[i]=(short)source[i];
7818         break;
7819       case C2LS:
7820         rs1[i]=(source[i]>>21)&0x1F;
7821         rs2[i]=0;
7822         rt1[i]=0;
7823         rt2[i]=0;
7824         imm[i]=(short)source[i];
7825         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
7826         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
7827         break;
7828       case C2OP:
7829         rs1[i]=0;
7830         rs2[i]=0;
7831         rt1[i]=0;
7832         rt2[i]=0;
7833         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
7834         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
7835         gte_rt[i]|=1ll<<63; // every op changes flags
7836         if((source[i]&0x3f)==GTE_MVMVA) {
7837           int v = (source[i] >> 15) & 3;
7838           gte_rs[i]&=~0xe3fll;
7839           if(v==3) gte_rs[i]|=0xe00ll;
7840           else gte_rs[i]|=3ll<<(v*2);
7841         }
7842         break;
7843       case FLOAT:
7844       case FCONV:
7845         rs1[i]=0;
7846         rs2[i]=CSREG;
7847         rt1[i]=0;
7848         rt2[i]=0;
7849         break;
7850       case FCOMP:
7851         rs1[i]=FSREG;
7852         rs2[i]=CSREG;
7853         rt1[i]=FSREG;
7854         rt2[i]=0;
7855         break;
7856       case SYSCALL:
7857       case HLECALL:
7858       case INTCALL:
7859         rs1[i]=CCREG;
7860         rs2[i]=0;
7861         rt1[i]=0;
7862         rt2[i]=0;
7863         break;
7864       default:
7865         rs1[i]=0;
7866         rs2[i]=0;
7867         rt1[i]=0;
7868         rt2[i]=0;
7869     }
7870     /* Calculate branch target addresses */
7871     if(type==UJUMP)
7872       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
7873     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
7874       ba[i]=start+i*4+8; // Ignore never taken branch
7875     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
7876       ba[i]=start+i*4+8; // Ignore never taken branch
7877     else if(type==CJUMP||type==SJUMP||type==FJUMP)
7878       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
7879     else ba[i]=-1;
7880     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
7881       int do_in_intrp=0;
7882       // branch in delay slot?
7883       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7884         // don't handle first branch and call interpreter if it's hit
7885         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7886         do_in_intrp=1;
7887       }
7888       // basic load delay detection
7889       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7890         int t=(ba[i-1]-start)/4;
7891         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7892           // jump target wants DS result - potential load delay effect
7893           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7894           do_in_intrp=1;
7895           bt[t+1]=1; // expected return from interpreter
7896         }
7897         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7898               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
7899           // v0 overwrite like this is a sign of trouble, bail out
7900           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7901           do_in_intrp=1;
7902         }
7903       }
7904       if(do_in_intrp) {
7905         rs1[i-1]=CCREG;
7906         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7907         ba[i-1]=-1;
7908         itype[i-1]=INTCALL;
7909         done=2;
7910         i--; // don't compile the DS
7911       }
7912     }
7913     /* Is this the end of the block? */
7914     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
7915       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7916         done=2;
7917       }
7918       else {
7919         if(stop_after_jal) done=1;
7920         // Stop on BREAK
7921         if((source[i+1]&0xfc00003f)==0x0d) done=1;
7922       }
7923       // Don't recompile stuff that's already compiled
7924       if(check_addr(start+i*4+4)) done=1;
7925       // Don't get too close to the limit
7926       if(i>MAXBLOCK/2) done=1;
7927     }
7928     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7929     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7930     if(done==2) {
7931       // Does the block continue due to a branch?
7932       for(j=i-1;j>=0;j--)
7933       {
7934         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7935         if(ba[j]==start+i*4+4) done=j=0;
7936         if(ba[j]==start+i*4+8) done=j=0;
7937       }
7938     }
7939     //assert(i<MAXBLOCK-1);
7940     if(start+i*4==pagelimit-4) done=1;
7941     assert(start+i*4<pagelimit);
7942     if (i==MAXBLOCK-1) done=1;
7943     // Stop if we're compiling junk
7944     if(itype[i]==NI&&opcode[i]==0x11) {
7945       done=stop_after_jal=1;
7946       SysPrintf("Disabled speculative precompilation\n");
7947     }
7948   }
7949   slen=i;
7950   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
7951     if(start+i*4==pagelimit) {
7952       itype[i-1]=SPAN;
7953     }
7954   }
7955   assert(slen>0);
7956
7957   /* Pass 2 - Register dependencies and branch targets */
7958
7959   unneeded_registers(0,slen-1,0);
7960
7961   /* Pass 3 - Register allocation */
7962
7963   struct regstat current; // Current register allocations/status
7964   current.is32=1;
7965   current.dirty=0;
7966   current.u=unneeded_reg[0];
7967   current.uu=unneeded_reg_upper[0];
7968   clear_all_regs(current.regmap);
7969   alloc_reg(&current,0,CCREG);
7970   dirty_reg(&current,CCREG);
7971   current.isconst=0;
7972   current.wasconst=0;
7973   current.waswritten=0;
7974   int ds=0;
7975   int cc=0;
7976   int hr=-1;
7977
7978   if((u_int)addr&1) {
7979     // First instruction is delay slot
7980     cc=-1;
7981     bt[1]=1;
7982     ds=1;
7983     unneeded_reg[0]=1;
7984     unneeded_reg_upper[0]=1;
7985     current.regmap[HOST_BTREG]=BTREG;
7986   }
7987
7988   for(i=0;i<slen;i++)
7989   {
7990     if(bt[i])
7991     {
7992       int hr;
7993       for(hr=0;hr<HOST_REGS;hr++)
7994       {
7995         // Is this really necessary?
7996         if(current.regmap[hr]==0) current.regmap[hr]=-1;
7997       }
7998       current.isconst=0;
7999       current.waswritten=0;
8000     }
8001     if(i>1)
8002     {
8003       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8004       {
8005         if(rs1[i-2]==0||rs2[i-2]==0)
8006         {
8007           if(rs1[i-2]) {
8008             current.is32|=1LL<<rs1[i-2];
8009             int hr=get_reg(current.regmap,rs1[i-2]|64);
8010             if(hr>=0) current.regmap[hr]=-1;
8011           }
8012           if(rs2[i-2]) {
8013             current.is32|=1LL<<rs2[i-2];
8014             int hr=get_reg(current.regmap,rs2[i-2]|64);
8015             if(hr>=0) current.regmap[hr]=-1;
8016           }
8017         }
8018       }
8019     }
8020     current.is32=-1LL;
8021
8022     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8023     regs[i].wasconst=current.isconst;
8024     regs[i].was32=current.is32;
8025     regs[i].wasdirty=current.dirty;
8026     regs[i].loadedconst=0;
8027     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8028       if(i+1<slen) {
8029         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8030         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8031         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8032         current.u|=1;
8033         current.uu|=1;
8034       } else {
8035         current.u=1;
8036         current.uu=1;
8037       }
8038     } else {
8039       if(i+1<slen) {
8040         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8041         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8042         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8043         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8044         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8045         current.u|=1;
8046         current.uu|=1;
8047       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
8048     }
8049     is_ds[i]=ds;
8050     if(ds) {
8051       ds=0; // Skip delay slot, already allocated as part of branch
8052       // ...but we need to alloc it in case something jumps here
8053       if(i+1<slen) {
8054         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8055         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8056       }else{
8057         current.u=branch_unneeded_reg[i-1];
8058         current.uu=branch_unneeded_reg_upper[i-1];
8059       }
8060       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8061       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8062       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8063       current.u|=1;
8064       current.uu|=1;
8065       struct regstat temp;
8066       memcpy(&temp,&current,sizeof(current));
8067       temp.wasdirty=temp.dirty;
8068       temp.was32=temp.is32;
8069       // TODO: Take into account unconditional branches, as below
8070       delayslot_alloc(&temp,i);
8071       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8072       regs[i].wasdirty=temp.wasdirty;
8073       regs[i].was32=temp.was32;
8074       regs[i].dirty=temp.dirty;
8075       regs[i].is32=temp.is32;
8076       regs[i].isconst=0;
8077       regs[i].wasconst=0;
8078       current.isconst=0;
8079       // Create entry (branch target) regmap
8080       for(hr=0;hr<HOST_REGS;hr++)
8081       {
8082         int r=temp.regmap[hr];
8083         if(r>=0) {
8084           if(r!=regmap_pre[i][hr]) {
8085             regs[i].regmap_entry[hr]=-1;
8086           }
8087           else
8088           {
8089             if(r<64){
8090               if((current.u>>r)&1) {
8091                 regs[i].regmap_entry[hr]=-1;
8092                 regs[i].regmap[hr]=-1;
8093                 //Don't clear regs in the delay slot as the branch might need them
8094                 //current.regmap[hr]=-1;
8095               }else
8096                 regs[i].regmap_entry[hr]=r;
8097             }
8098             else {
8099               if((current.uu>>(r&63))&1) {
8100                 regs[i].regmap_entry[hr]=-1;
8101                 regs[i].regmap[hr]=-1;
8102                 //Don't clear regs in the delay slot as the branch might need them
8103                 //current.regmap[hr]=-1;
8104               }else
8105                 regs[i].regmap_entry[hr]=r;
8106             }
8107           }
8108         } else {
8109           // First instruction expects CCREG to be allocated
8110           if(i==0&&hr==HOST_CCREG)
8111             regs[i].regmap_entry[hr]=CCREG;
8112           else
8113             regs[i].regmap_entry[hr]=-1;
8114         }
8115       }
8116     }
8117     else { // Not delay slot
8118       switch(itype[i]) {
8119         case UJUMP:
8120           //current.isconst=0; // DEBUG
8121           //current.wasconst=0; // DEBUG
8122           //regs[i].wasconst=0; // DEBUG
8123           clear_const(&current,rt1[i]);
8124           alloc_cc(&current,i);
8125           dirty_reg(&current,CCREG);
8126           if (rt1[i]==31) {
8127             alloc_reg(&current,i,31);
8128             dirty_reg(&current,31);
8129             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8130             //assert(rt1[i+1]!=rt1[i]);
8131             #ifdef REG_PREFETCH
8132             alloc_reg(&current,i,PTEMP);
8133             #endif
8134             //current.is32|=1LL<<rt1[i];
8135           }
8136           ooo[i]=1;
8137           delayslot_alloc(&current,i+1);
8138           //current.isconst=0; // DEBUG
8139           ds=1;
8140           //printf("i=%d, isconst=%x\n",i,current.isconst);
8141           break;
8142         case RJUMP:
8143           //current.isconst=0;
8144           //current.wasconst=0;
8145           //regs[i].wasconst=0;
8146           clear_const(&current,rs1[i]);
8147           clear_const(&current,rt1[i]);
8148           alloc_cc(&current,i);
8149           dirty_reg(&current,CCREG);
8150           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8151             alloc_reg(&current,i,rs1[i]);
8152             if (rt1[i]!=0) {
8153               alloc_reg(&current,i,rt1[i]);
8154               dirty_reg(&current,rt1[i]);
8155               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
8156               assert(rt1[i+1]!=rt1[i]);
8157               #ifdef REG_PREFETCH
8158               alloc_reg(&current,i,PTEMP);
8159               #endif
8160             }
8161             #ifdef USE_MINI_HT
8162             if(rs1[i]==31) { // JALR
8163               alloc_reg(&current,i,RHASH);
8164               #ifndef HOST_IMM_ADDR32
8165               alloc_reg(&current,i,RHTBL);
8166               #endif
8167             }
8168             #endif
8169             delayslot_alloc(&current,i+1);
8170           } else {
8171             // The delay slot overwrites our source register,
8172             // allocate a temporary register to hold the old value.
8173             current.isconst=0;
8174             current.wasconst=0;
8175             regs[i].wasconst=0;
8176             delayslot_alloc(&current,i+1);
8177             current.isconst=0;
8178             alloc_reg(&current,i,RTEMP);
8179           }
8180           //current.isconst=0; // DEBUG
8181           ooo[i]=1;
8182           ds=1;
8183           break;
8184         case CJUMP:
8185           //current.isconst=0;
8186           //current.wasconst=0;
8187           //regs[i].wasconst=0;
8188           clear_const(&current,rs1[i]);
8189           clear_const(&current,rs2[i]);
8190           if((opcode[i]&0x3E)==4) // BEQ/BNE
8191           {
8192             alloc_cc(&current,i);
8193             dirty_reg(&current,CCREG);
8194             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8195             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8196             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8197             {
8198               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8199               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8200             }
8201             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8202                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8203               // The delay slot overwrites one of our conditions.
8204               // Allocate the branch condition registers instead.
8205               current.isconst=0;
8206               current.wasconst=0;
8207               regs[i].wasconst=0;
8208               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8209               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8210               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8211               {
8212                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8213                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8214               }
8215             }
8216             else
8217             {
8218               ooo[i]=1;
8219               delayslot_alloc(&current,i+1);
8220             }
8221           }
8222           else
8223           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8224           {
8225             alloc_cc(&current,i);
8226             dirty_reg(&current,CCREG);
8227             alloc_reg(&current,i,rs1[i]);
8228             if(!(current.is32>>rs1[i]&1))
8229             {
8230               alloc_reg64(&current,i,rs1[i]);
8231             }
8232             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8233               // The delay slot overwrites one of our conditions.
8234               // Allocate the branch condition registers instead.
8235               current.isconst=0;
8236               current.wasconst=0;
8237               regs[i].wasconst=0;
8238               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8239               if(!((current.is32>>rs1[i])&1))
8240               {
8241                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8242               }
8243             }
8244             else
8245             {
8246               ooo[i]=1;
8247               delayslot_alloc(&current,i+1);
8248             }
8249           }
8250           else
8251           // Don't alloc the delay slot yet because we might not execute it
8252           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8253           {
8254             current.isconst=0;
8255             current.wasconst=0;
8256             regs[i].wasconst=0;
8257             alloc_cc(&current,i);
8258             dirty_reg(&current,CCREG);
8259             alloc_reg(&current,i,rs1[i]);
8260             alloc_reg(&current,i,rs2[i]);
8261             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8262             {
8263               alloc_reg64(&current,i,rs1[i]);
8264               alloc_reg64(&current,i,rs2[i]);
8265             }
8266           }
8267           else
8268           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8269           {
8270             current.isconst=0;
8271             current.wasconst=0;
8272             regs[i].wasconst=0;
8273             alloc_cc(&current,i);
8274             dirty_reg(&current,CCREG);
8275             alloc_reg(&current,i,rs1[i]);
8276             if(!(current.is32>>rs1[i]&1))
8277             {
8278               alloc_reg64(&current,i,rs1[i]);
8279             }
8280           }
8281           ds=1;
8282           //current.isconst=0;
8283           break;
8284         case SJUMP:
8285           //current.isconst=0;
8286           //current.wasconst=0;
8287           //regs[i].wasconst=0;
8288           clear_const(&current,rs1[i]);
8289           clear_const(&current,rt1[i]);
8290           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8291           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8292           {
8293             alloc_cc(&current,i);
8294             dirty_reg(&current,CCREG);
8295             alloc_reg(&current,i,rs1[i]);
8296             if(!(current.is32>>rs1[i]&1))
8297             {
8298               alloc_reg64(&current,i,rs1[i]);
8299             }
8300             if (rt1[i]==31) { // BLTZAL/BGEZAL
8301               alloc_reg(&current,i,31);
8302               dirty_reg(&current,31);
8303               //#ifdef REG_PREFETCH
8304               //alloc_reg(&current,i,PTEMP);
8305               //#endif
8306               //current.is32|=1LL<<rt1[i];
8307             }
8308             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
8309                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
8310               // Allocate the branch condition registers instead.
8311               current.isconst=0;
8312               current.wasconst=0;
8313               regs[i].wasconst=0;
8314               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8315               if(!((current.is32>>rs1[i])&1))
8316               {
8317                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8318               }
8319             }
8320             else
8321             {
8322               ooo[i]=1;
8323               delayslot_alloc(&current,i+1);
8324             }
8325           }
8326           else
8327           // Don't alloc the delay slot yet because we might not execute it
8328           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8329           {
8330             current.isconst=0;
8331             current.wasconst=0;
8332             regs[i].wasconst=0;
8333             alloc_cc(&current,i);
8334             dirty_reg(&current,CCREG);
8335             alloc_reg(&current,i,rs1[i]);
8336             if(!(current.is32>>rs1[i]&1))
8337             {
8338               alloc_reg64(&current,i,rs1[i]);
8339             }
8340           }
8341           ds=1;
8342           //current.isconst=0;
8343           break;
8344         case FJUMP:
8345           current.isconst=0;
8346           current.wasconst=0;
8347           regs[i].wasconst=0;
8348           if(likely[i]==0) // BC1F/BC1T
8349           {
8350             // TODO: Theoretically we can run out of registers here on x86.
8351             // The delay slot can allocate up to six, and we need to check
8352             // CSREG before executing the delay slot.  Possibly we can drop
8353             // the cycle count and then reload it after checking that the
8354             // FPU is in a usable state, or don't do out-of-order execution.
8355             alloc_cc(&current,i);
8356             dirty_reg(&current,CCREG);
8357             alloc_reg(&current,i,FSREG);
8358             alloc_reg(&current,i,CSREG);
8359             if(itype[i+1]==FCOMP) {
8360               // The delay slot overwrites the branch condition.
8361               // Allocate the branch condition registers instead.
8362               alloc_cc(&current,i);
8363               dirty_reg(&current,CCREG);
8364               alloc_reg(&current,i,CSREG);
8365               alloc_reg(&current,i,FSREG);
8366             }
8367             else {
8368               ooo[i]=1;
8369               delayslot_alloc(&current,i+1);
8370               alloc_reg(&current,i+1,CSREG);
8371             }
8372           }
8373           else
8374           // Don't alloc the delay slot yet because we might not execute it
8375           if(likely[i]) // BC1FL/BC1TL
8376           {
8377             alloc_cc(&current,i);
8378             dirty_reg(&current,CCREG);
8379             alloc_reg(&current,i,CSREG);
8380             alloc_reg(&current,i,FSREG);
8381           }
8382           ds=1;
8383           current.isconst=0;
8384           break;
8385         case IMM16:
8386           imm16_alloc(&current,i);
8387           break;
8388         case LOAD:
8389         case LOADLR:
8390           load_alloc(&current,i);
8391           break;
8392         case STORE:
8393         case STORELR:
8394           store_alloc(&current,i);
8395           break;
8396         case ALU:
8397           alu_alloc(&current,i);
8398           break;
8399         case SHIFT:
8400           shift_alloc(&current,i);
8401           break;
8402         case MULTDIV:
8403           multdiv_alloc(&current,i);
8404           break;
8405         case SHIFTIMM:
8406           shiftimm_alloc(&current,i);
8407           break;
8408         case MOV:
8409           mov_alloc(&current,i);
8410           break;
8411         case COP0:
8412           cop0_alloc(&current,i);
8413           break;
8414         case COP1:
8415         case COP2:
8416           cop1_alloc(&current,i);
8417           break;
8418         case C1LS:
8419           c1ls_alloc(&current,i);
8420           break;
8421         case C2LS:
8422           c2ls_alloc(&current,i);
8423           break;
8424         case C2OP:
8425           c2op_alloc(&current,i);
8426           break;
8427         case FCONV:
8428           fconv_alloc(&current,i);
8429           break;
8430         case FLOAT:
8431           float_alloc(&current,i);
8432           break;
8433         case FCOMP:
8434           fcomp_alloc(&current,i);
8435           break;
8436         case SYSCALL:
8437         case HLECALL:
8438         case INTCALL:
8439           syscall_alloc(&current,i);
8440           break;
8441         case SPAN:
8442           pagespan_alloc(&current,i);
8443           break;
8444       }
8445
8446       // Drop the upper half of registers that have become 32-bit
8447       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8448       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8449         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8450         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8451         current.uu|=1;
8452       } else {
8453         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8454         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8455         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8456         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8457         current.uu|=1;
8458       }
8459
8460       // Create entry (branch target) regmap
8461       for(hr=0;hr<HOST_REGS;hr++)
8462       {
8463         int r,or;
8464         r=current.regmap[hr];
8465         if(r>=0) {
8466           if(r!=regmap_pre[i][hr]) {
8467             // TODO: delay slot (?)
8468             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
8469             if(or<0||(r&63)>=TEMPREG){
8470               regs[i].regmap_entry[hr]=-1;
8471             }
8472             else
8473             {
8474               // Just move it to a different register
8475               regs[i].regmap_entry[hr]=r;
8476               // If it was dirty before, it's still dirty
8477               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
8478             }
8479           }
8480           else
8481           {
8482             // Unneeded
8483             if(r==0){
8484               regs[i].regmap_entry[hr]=0;
8485             }
8486             else
8487             if(r<64){
8488               if((current.u>>r)&1) {
8489                 regs[i].regmap_entry[hr]=-1;
8490                 //regs[i].regmap[hr]=-1;
8491                 current.regmap[hr]=-1;
8492               }else
8493                 regs[i].regmap_entry[hr]=r;
8494             }
8495             else {
8496               if((current.uu>>(r&63))&1) {
8497                 regs[i].regmap_entry[hr]=-1;
8498                 //regs[i].regmap[hr]=-1;
8499                 current.regmap[hr]=-1;
8500               }else
8501                 regs[i].regmap_entry[hr]=r;
8502             }
8503           }
8504         } else {
8505           // Branches expect CCREG to be allocated at the target
8506           if(regmap_pre[i][hr]==CCREG)
8507             regs[i].regmap_entry[hr]=CCREG;
8508           else
8509             regs[i].regmap_entry[hr]=-1;
8510         }
8511       }
8512       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
8513     }
8514
8515     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
8516       current.waswritten|=1<<rs1[i-1];
8517     current.waswritten&=~(1<<rt1[i]);
8518     current.waswritten&=~(1<<rt2[i]);
8519     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
8520       current.waswritten&=~(1<<rs1[i]);
8521
8522     /* Branch post-alloc */
8523     if(i>0)
8524     {
8525       current.was32=current.is32;
8526       current.wasdirty=current.dirty;
8527       switch(itype[i-1]) {
8528         case UJUMP:
8529           memcpy(&branch_regs[i-1],&current,sizeof(current));
8530           branch_regs[i-1].isconst=0;
8531           branch_regs[i-1].wasconst=0;
8532           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8533           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8534           alloc_cc(&branch_regs[i-1],i-1);
8535           dirty_reg(&branch_regs[i-1],CCREG);
8536           if(rt1[i-1]==31) { // JAL
8537             alloc_reg(&branch_regs[i-1],i-1,31);
8538             dirty_reg(&branch_regs[i-1],31);
8539             branch_regs[i-1].is32|=1LL<<31;
8540           }
8541           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8542           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8543           break;
8544         case RJUMP:
8545           memcpy(&branch_regs[i-1],&current,sizeof(current));
8546           branch_regs[i-1].isconst=0;
8547           branch_regs[i-1].wasconst=0;
8548           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8549           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8550           alloc_cc(&branch_regs[i-1],i-1);
8551           dirty_reg(&branch_regs[i-1],CCREG);
8552           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
8553           if(rt1[i-1]!=0) { // JALR
8554             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
8555             dirty_reg(&branch_regs[i-1],rt1[i-1]);
8556             branch_regs[i-1].is32|=1LL<<rt1[i-1];
8557           }
8558           #ifdef USE_MINI_HT
8559           if(rs1[i-1]==31) { // JALR
8560             alloc_reg(&branch_regs[i-1],i-1,RHASH);
8561             #ifndef HOST_IMM_ADDR32
8562             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
8563             #endif
8564           }
8565           #endif
8566           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8567           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8568           break;
8569         case CJUMP:
8570           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
8571           {
8572             alloc_cc(&current,i-1);
8573             dirty_reg(&current,CCREG);
8574             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
8575                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
8576               // The delay slot overwrote one of our conditions
8577               // Delay slot goes after the test (in order)
8578               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8579               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8580               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8581               current.u|=1;
8582               current.uu|=1;
8583               delayslot_alloc(&current,i);
8584               current.isconst=0;
8585             }
8586             else
8587             {
8588               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8589               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8590               // Alloc the branch condition registers
8591               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
8592               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
8593               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
8594               {
8595                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
8596                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
8597               }
8598             }
8599             memcpy(&branch_regs[i-1],&current,sizeof(current));
8600             branch_regs[i-1].isconst=0;
8601             branch_regs[i-1].wasconst=0;
8602             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8603             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8604           }
8605           else
8606           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
8607           {
8608             alloc_cc(&current,i-1);
8609             dirty_reg(&current,CCREG);
8610             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8611               // The delay slot overwrote the branch condition
8612               // Delay slot goes after the test (in order)
8613               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8614               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8615               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8616               current.u|=1;
8617               current.uu|=1;
8618               delayslot_alloc(&current,i);
8619               current.isconst=0;
8620             }
8621             else
8622             {
8623               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8624               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8625               // Alloc the branch condition register
8626               alloc_reg(&current,i-1,rs1[i-1]);
8627               if(!(current.is32>>rs1[i-1]&1))
8628               {
8629                 alloc_reg64(&current,i-1,rs1[i-1]);
8630               }
8631             }
8632             memcpy(&branch_regs[i-1],&current,sizeof(current));
8633             branch_regs[i-1].isconst=0;
8634             branch_regs[i-1].wasconst=0;
8635             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8636             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8637           }
8638           else
8639           // Alloc the delay slot in case the branch is taken
8640           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
8641           {
8642             memcpy(&branch_regs[i-1],&current,sizeof(current));
8643             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8644             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8645             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8646             alloc_cc(&branch_regs[i-1],i);
8647             dirty_reg(&branch_regs[i-1],CCREG);
8648             delayslot_alloc(&branch_regs[i-1],i);
8649             branch_regs[i-1].isconst=0;
8650             alloc_reg(&current,i,CCREG); // Not taken path
8651             dirty_reg(&current,CCREG);
8652             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8653           }
8654           else
8655           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
8656           {
8657             memcpy(&branch_regs[i-1],&current,sizeof(current));
8658             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8659             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8660             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8661             alloc_cc(&branch_regs[i-1],i);
8662             dirty_reg(&branch_regs[i-1],CCREG);
8663             delayslot_alloc(&branch_regs[i-1],i);
8664             branch_regs[i-1].isconst=0;
8665             alloc_reg(&current,i,CCREG); // Not taken path
8666             dirty_reg(&current,CCREG);
8667             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8668           }
8669           break;
8670         case SJUMP:
8671           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
8672           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
8673           {
8674             alloc_cc(&current,i-1);
8675             dirty_reg(&current,CCREG);
8676             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8677               // The delay slot overwrote the branch condition
8678               // Delay slot goes after the test (in order)
8679               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8680               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8681               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8682               current.u|=1;
8683               current.uu|=1;
8684               delayslot_alloc(&current,i);
8685               current.isconst=0;
8686             }
8687             else
8688             {
8689               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8690               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8691               // Alloc the branch condition register
8692               alloc_reg(&current,i-1,rs1[i-1]);
8693               if(!(current.is32>>rs1[i-1]&1))
8694               {
8695                 alloc_reg64(&current,i-1,rs1[i-1]);
8696               }
8697             }
8698             memcpy(&branch_regs[i-1],&current,sizeof(current));
8699             branch_regs[i-1].isconst=0;
8700             branch_regs[i-1].wasconst=0;
8701             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8702             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8703           }
8704           else
8705           // Alloc the delay slot in case the branch is taken
8706           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
8707           {
8708             memcpy(&branch_regs[i-1],&current,sizeof(current));
8709             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8710             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8711             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8712             alloc_cc(&branch_regs[i-1],i);
8713             dirty_reg(&branch_regs[i-1],CCREG);
8714             delayslot_alloc(&branch_regs[i-1],i);
8715             branch_regs[i-1].isconst=0;
8716             alloc_reg(&current,i,CCREG); // Not taken path
8717             dirty_reg(&current,CCREG);
8718             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8719           }
8720           // FIXME: BLTZAL/BGEZAL
8721           if(opcode2[i-1]&0x10) { // BxxZAL
8722             alloc_reg(&branch_regs[i-1],i-1,31);
8723             dirty_reg(&branch_regs[i-1],31);
8724             branch_regs[i-1].is32|=1LL<<31;
8725           }
8726           break;
8727         case FJUMP:
8728           if(likely[i-1]==0) // BC1F/BC1T
8729           {
8730             alloc_cc(&current,i-1);
8731             dirty_reg(&current,CCREG);
8732             if(itype[i]==FCOMP) {
8733               // The delay slot overwrote the branch condition
8734               // Delay slot goes after the test (in order)
8735               delayslot_alloc(&current,i);
8736               current.isconst=0;
8737             }
8738             else
8739             {
8740               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8741               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8742               // Alloc the branch condition register
8743               alloc_reg(&current,i-1,FSREG);
8744             }
8745             memcpy(&branch_regs[i-1],&current,sizeof(current));
8746             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8747           }
8748           else // BC1FL/BC1TL
8749           {
8750             // Alloc the delay slot in case the branch is taken
8751             memcpy(&branch_regs[i-1],&current,sizeof(current));
8752             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8753             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8754             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8755             alloc_cc(&branch_regs[i-1],i);
8756             dirty_reg(&branch_regs[i-1],CCREG);
8757             delayslot_alloc(&branch_regs[i-1],i);
8758             branch_regs[i-1].isconst=0;
8759             alloc_reg(&current,i,CCREG); // Not taken path
8760             dirty_reg(&current,CCREG);
8761             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8762           }
8763           break;
8764       }
8765
8766       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
8767       {
8768         if(rt1[i-1]==31) // JAL/JALR
8769         {
8770           // Subroutine call will return here, don't alloc any registers
8771           current.is32=1;
8772           current.dirty=0;
8773           clear_all_regs(current.regmap);
8774           alloc_reg(&current,i,CCREG);
8775           dirty_reg(&current,CCREG);
8776         }
8777         else if(i+1<slen)
8778         {
8779           // Internal branch will jump here, match registers to caller
8780           current.is32=0x3FFFFFFFFLL;
8781           current.dirty=0;
8782           clear_all_regs(current.regmap);
8783           alloc_reg(&current,i,CCREG);
8784           dirty_reg(&current,CCREG);
8785           for(j=i-1;j>=0;j--)
8786           {
8787             if(ba[j]==start+i*4+4) {
8788               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
8789               current.is32=branch_regs[j].is32;
8790               current.dirty=branch_regs[j].dirty;
8791               break;
8792             }
8793           }
8794           while(j>=0) {
8795             if(ba[j]==start+i*4+4) {
8796               for(hr=0;hr<HOST_REGS;hr++) {
8797                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
8798                   current.regmap[hr]=-1;
8799                 }
8800                 current.is32&=branch_regs[j].is32;
8801                 current.dirty&=branch_regs[j].dirty;
8802               }
8803             }
8804             j--;
8805           }
8806         }
8807       }
8808     }
8809
8810     // Count cycles in between branches
8811     ccadj[i]=cc;
8812     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
8813     {
8814       cc=0;
8815     }
8816 #if !defined(DRC_DBG)
8817     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
8818     {
8819       // GTE runs in parallel until accessed, divide by 2 for a rough guess
8820       cc+=gte_cycletab[source[i]&0x3f]/2;
8821     }
8822     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
8823     {
8824       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
8825     }
8826     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
8827     {
8828       cc+=4;
8829     }
8830     else if(itype[i]==C2LS)
8831     {
8832       cc+=4;
8833     }
8834 #endif
8835     else
8836     {
8837       cc++;
8838     }
8839
8840     flush_dirty_uppers(&current);
8841     if(!is_ds[i]) {
8842       regs[i].is32=current.is32;
8843       regs[i].dirty=current.dirty;
8844       regs[i].isconst=current.isconst;
8845       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
8846     }
8847     for(hr=0;hr<HOST_REGS;hr++) {
8848       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
8849         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
8850           regs[i].wasconst&=~(1<<hr);
8851         }
8852       }
8853     }
8854     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
8855     regs[i].waswritten=current.waswritten;
8856   }
8857
8858   /* Pass 4 - Cull unused host registers */
8859
8860   uint64_t nr=0;
8861
8862   for (i=slen-1;i>=0;i--)
8863   {
8864     int hr;
8865     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
8866     {
8867       if(ba[i]<start || ba[i]>=(start+slen*4))
8868       {
8869         // Branch out of this block, don't need anything
8870         nr=0;
8871       }
8872       else
8873       {
8874         // Internal branch
8875         // Need whatever matches the target
8876         nr=0;
8877         int t=(ba[i]-start)>>2;
8878         for(hr=0;hr<HOST_REGS;hr++)
8879         {
8880           if(regs[i].regmap_entry[hr]>=0) {
8881             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
8882           }
8883         }
8884       }
8885       // Conditional branch may need registers for following instructions
8886       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
8887       {
8888         if(i<slen-2) {
8889           nr|=needed_reg[i+2];
8890           for(hr=0;hr<HOST_REGS;hr++)
8891           {
8892             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
8893             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
8894           }
8895         }
8896       }
8897       // Don't need stuff which is overwritten
8898       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8899       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8900       // Merge in delay slot
8901       for(hr=0;hr<HOST_REGS;hr++)
8902       {
8903         if(!likely[i]) {
8904           // These are overwritten unless the branch is "likely"
8905           // and the delay slot is nullified if not taken
8906           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8907           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8908         }
8909         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8910         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8911         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8912         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8913         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8914         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8915         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8916         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8917         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
8918           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8919           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8920         }
8921         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
8922           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8923           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8924         }
8925         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
8926           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8927           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8928         }
8929       }
8930     }
8931     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
8932     {
8933       // SYSCALL instruction (software interrupt)
8934       nr=0;
8935     }
8936     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
8937     {
8938       // ERET instruction (return from interrupt)
8939       nr=0;
8940     }
8941     else // Non-branch
8942     {
8943       if(i<slen-1) {
8944         for(hr=0;hr<HOST_REGS;hr++) {
8945           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
8946           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
8947           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8948           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8949         }
8950       }
8951     }
8952     for(hr=0;hr<HOST_REGS;hr++)
8953     {
8954       // Overwritten registers are not needed
8955       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8956       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8957       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8958       // Source registers are needed
8959       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8960       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8961       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
8962       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
8963       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8964       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8965       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8966       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8967       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
8968         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8969         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8970       }
8971       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
8972         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8973         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8974       }
8975       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
8976         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8977         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8978       }
8979       // Don't store a register immediately after writing it,
8980       // may prevent dual-issue.
8981       // But do so if this is a branch target, otherwise we
8982       // might have to load the register before the branch.
8983       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
8984         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
8985            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
8986           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8987           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8988         }
8989         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
8990            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
8991           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8992           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8993         }
8994       }
8995     }
8996     // Cycle count is needed at branches.  Assume it is needed at the target too.
8997     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
8998       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8999       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9000     }
9001     // Save it
9002     needed_reg[i]=nr;
9003
9004     // Deallocate unneeded registers
9005     for(hr=0;hr<HOST_REGS;hr++)
9006     {
9007       if(!((nr>>hr)&1)) {
9008         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9009         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9010            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9011            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9012         {
9013           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9014           {
9015             if(likely[i]) {
9016               regs[i].regmap[hr]=-1;
9017               regs[i].isconst&=~(1<<hr);
9018               if(i<slen-2) {
9019                 regmap_pre[i+2][hr]=-1;
9020                 regs[i+2].wasconst&=~(1<<hr);
9021               }
9022             }
9023           }
9024         }
9025         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9026         {
9027           int d1=0,d2=0,map=0,temp=0;
9028           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9029           {
9030             d1=dep1[i+1];
9031             d2=dep2[i+1];
9032           }
9033           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9034              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9035             map=INVCP;
9036           }
9037           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9038              itype[i+1]==C1LS || itype[i+1]==C2LS)
9039             temp=FTEMP;
9040           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9041              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9042              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9043              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9044              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9045              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9046              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9047              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9048              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9049              regs[i].regmap[hr]!=map )
9050           {
9051             regs[i].regmap[hr]=-1;
9052             regs[i].isconst&=~(1<<hr);
9053             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9054                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9055                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9056                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9057                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9058                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9059                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9060                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9061                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9062                branch_regs[i].regmap[hr]!=map)
9063             {
9064               branch_regs[i].regmap[hr]=-1;
9065               branch_regs[i].regmap_entry[hr]=-1;
9066               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9067               {
9068                 if(!likely[i]&&i<slen-2) {
9069                   regmap_pre[i+2][hr]=-1;
9070                   regs[i+2].wasconst&=~(1<<hr);
9071                 }
9072               }
9073             }
9074           }
9075         }
9076         else
9077         {
9078           // Non-branch
9079           if(i>0)
9080           {
9081             int d1=0,d2=0,map=-1,temp=-1;
9082             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9083             {
9084               d1=dep1[i];
9085               d2=dep2[i];
9086             }
9087             if(itype[i]==STORE || itype[i]==STORELR ||
9088                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9089               map=INVCP;
9090             }
9091             if(itype[i]==LOADLR || itype[i]==STORELR ||
9092                itype[i]==C1LS || itype[i]==C2LS)
9093               temp=FTEMP;
9094             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9095                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9096                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9097                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9098                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9099                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9100             {
9101               if(i<slen-1&&!is_ds[i]) {
9102                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9103                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9104                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9105                 {
9106                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9107                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9108                 }
9109                 regmap_pre[i+1][hr]=-1;
9110                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9111                 regs[i+1].wasconst&=~(1<<hr);
9112               }
9113               regs[i].regmap[hr]=-1;
9114               regs[i].isconst&=~(1<<hr);
9115             }
9116           }
9117         }
9118       }
9119     }
9120   }
9121
9122   /* Pass 5 - Pre-allocate registers */
9123
9124   // If a register is allocated during a loop, try to allocate it for the
9125   // entire loop, if possible.  This avoids loading/storing registers
9126   // inside of the loop.
9127
9128   signed char f_regmap[HOST_REGS];
9129   clear_all_regs(f_regmap);
9130   for(i=0;i<slen-1;i++)
9131   {
9132     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9133     {
9134       if(ba[i]>=start && ba[i]<(start+i*4))
9135       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9136       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9137       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9138       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9139       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9140       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9141       {
9142         int t=(ba[i]-start)>>2;
9143         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9144         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
9145         for(hr=0;hr<HOST_REGS;hr++)
9146         {
9147           if(regs[i].regmap[hr]>64) {
9148             if(!((regs[i].dirty>>hr)&1))
9149               f_regmap[hr]=regs[i].regmap[hr];
9150             else f_regmap[hr]=-1;
9151           }
9152           else if(regs[i].regmap[hr]>=0) {
9153             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9154               // dealloc old register
9155               int n;
9156               for(n=0;n<HOST_REGS;n++)
9157               {
9158                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9159               }
9160               // and alloc new one
9161               f_regmap[hr]=regs[i].regmap[hr];
9162             }
9163           }
9164           if(branch_regs[i].regmap[hr]>64) {
9165             if(!((branch_regs[i].dirty>>hr)&1))
9166               f_regmap[hr]=branch_regs[i].regmap[hr];
9167             else f_regmap[hr]=-1;
9168           }
9169           else if(branch_regs[i].regmap[hr]>=0) {
9170             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9171               // dealloc old register
9172               int n;
9173               for(n=0;n<HOST_REGS;n++)
9174               {
9175                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9176               }
9177               // and alloc new one
9178               f_regmap[hr]=branch_regs[i].regmap[hr];
9179             }
9180           }
9181           if(ooo[i]) {
9182             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
9183               f_regmap[hr]=branch_regs[i].regmap[hr];
9184           }else{
9185             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
9186               f_regmap[hr]=branch_regs[i].regmap[hr];
9187           }
9188           // Avoid dirty->clean transition
9189           #ifdef DESTRUCTIVE_WRITEBACK
9190           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9191           #endif
9192           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
9193           // case above, however it's always a good idea.  We can't hoist the
9194           // load if the register was already allocated, so there's no point
9195           // wasting time analyzing most of these cases.  It only "succeeds"
9196           // when the mapping was different and the load can be replaced with
9197           // a mov, which is of negligible benefit.  So such cases are
9198           // skipped below.
9199           if(f_regmap[hr]>0) {
9200             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
9201               int r=f_regmap[hr];
9202               for(j=t;j<=i;j++)
9203               {
9204                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9205                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9206                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9207                 if(r>63) {
9208                   // NB This can exclude the case where the upper-half
9209                   // register is lower numbered than the lower-half
9210                   // register.  Not sure if it's worth fixing...
9211                   if(get_reg(regs[j].regmap,r&63)<0) break;
9212                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
9213                   if(regs[j].is32&(1LL<<(r&63))) break;
9214                 }
9215                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9216                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9217                   int k;
9218                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9219                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9220                     if(r>63) {
9221                       if(get_reg(regs[i].regmap,r&63)<0) break;
9222                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9223                     }
9224                     k=i;
9225                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9226                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9227                         //printf("no free regs for store %x\n",start+(k-1)*4);
9228                         break;
9229                       }
9230                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9231                         //printf("no-match due to different register\n");
9232                         break;
9233                       }
9234                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9235                         //printf("no-match due to branch\n");
9236                         break;
9237                       }
9238                       // call/ret fast path assumes no registers allocated
9239                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
9240                         break;
9241                       }
9242                       if(r>63) {
9243                         // NB This can exclude the case where the upper-half
9244                         // register is lower numbered than the lower-half
9245                         // register.  Not sure if it's worth fixing...
9246                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9247                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9248                       }
9249                       k--;
9250                     }
9251                     if(i<slen-1) {
9252                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9253                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9254                         //printf("bad match after branch\n");
9255                         break;
9256                       }
9257                     }
9258                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9259                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9260                       while(k<i) {
9261                         regs[k].regmap_entry[hr]=f_regmap[hr];
9262                         regs[k].regmap[hr]=f_regmap[hr];
9263                         regmap_pre[k+1][hr]=f_regmap[hr];
9264                         regs[k].wasdirty&=~(1<<hr);
9265                         regs[k].dirty&=~(1<<hr);
9266                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9267                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9268                         regs[k].wasconst&=~(1<<hr);
9269                         regs[k].isconst&=~(1<<hr);
9270                         k++;
9271                       }
9272                     }
9273                     else {
9274                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9275                       break;
9276                     }
9277                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9278                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9279                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9280                       regs[i].regmap_entry[hr]=f_regmap[hr];
9281                       regs[i].regmap[hr]=f_regmap[hr];
9282                       regs[i].wasdirty&=~(1<<hr);
9283                       regs[i].dirty&=~(1<<hr);
9284                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9285                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9286                       regs[i].wasconst&=~(1<<hr);
9287                       regs[i].isconst&=~(1<<hr);
9288                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9289                       branch_regs[i].wasdirty&=~(1<<hr);
9290                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9291                       branch_regs[i].regmap[hr]=f_regmap[hr];
9292                       branch_regs[i].dirty&=~(1<<hr);
9293                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9294                       branch_regs[i].wasconst&=~(1<<hr);
9295                       branch_regs[i].isconst&=~(1<<hr);
9296                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9297                         regmap_pre[i+2][hr]=f_regmap[hr];
9298                         regs[i+2].wasdirty&=~(1<<hr);
9299                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9300                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9301                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9302                       }
9303                     }
9304                   }
9305                   for(k=t;k<j;k++) {
9306                     // Alloc register clean at beginning of loop,
9307                     // but may dirty it in pass 6
9308                     regs[k].regmap_entry[hr]=f_regmap[hr];
9309                     regs[k].regmap[hr]=f_regmap[hr];
9310                     regs[k].dirty&=~(1<<hr);
9311                     regs[k].wasconst&=~(1<<hr);
9312                     regs[k].isconst&=~(1<<hr);
9313                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
9314                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
9315                       branch_regs[k].regmap[hr]=f_regmap[hr];
9316                       branch_regs[k].dirty&=~(1<<hr);
9317                       branch_regs[k].wasconst&=~(1<<hr);
9318                       branch_regs[k].isconst&=~(1<<hr);
9319                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
9320                         regmap_pre[k+2][hr]=f_regmap[hr];
9321                         regs[k+2].wasdirty&=~(1<<hr);
9322                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
9323                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
9324                       }
9325                     }
9326                     else
9327                     {
9328                       regmap_pre[k+1][hr]=f_regmap[hr];
9329                       regs[k+1].wasdirty&=~(1<<hr);
9330                     }
9331                   }
9332                   if(regs[j].regmap[hr]==f_regmap[hr])
9333                     regs[j].regmap_entry[hr]=f_regmap[hr];
9334                   break;
9335                 }
9336                 if(j==i) break;
9337                 if(regs[j].regmap[hr]>=0)
9338                   break;
9339                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9340                   //printf("no-match due to different register\n");
9341                   break;
9342                 }
9343                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9344                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9345                   break;
9346                 }
9347                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9348                 {
9349                   // Stop on unconditional branch
9350                   break;
9351                 }
9352                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
9353                 {
9354                   if(ooo[j]) {
9355                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
9356                       break;
9357                   }else{
9358                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
9359                       break;
9360                   }
9361                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
9362                     //printf("no-match due to different register (branch)\n");
9363                     break;
9364                   }
9365                 }
9366                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9367                   //printf("No free regs for store %x\n",start+j*4);
9368                   break;
9369                 }
9370                 if(f_regmap[hr]>=64) {
9371                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9372                     break;
9373                   }
9374                   else
9375                   {
9376                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9377                       break;
9378                     }
9379                   }
9380                 }
9381               }
9382             }
9383           }
9384         }
9385       }
9386     }else{
9387       // Non branch or undetermined branch target
9388       for(hr=0;hr<HOST_REGS;hr++)
9389       {
9390         if(hr!=EXCLUDE_REG) {
9391           if(regs[i].regmap[hr]>64) {
9392             if(!((regs[i].dirty>>hr)&1))
9393               f_regmap[hr]=regs[i].regmap[hr];
9394           }
9395           else if(regs[i].regmap[hr]>=0) {
9396             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9397               // dealloc old register
9398               int n;
9399               for(n=0;n<HOST_REGS;n++)
9400               {
9401                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9402               }
9403               // and alloc new one
9404               f_regmap[hr]=regs[i].regmap[hr];
9405             }
9406           }
9407         }
9408       }
9409       // Try to restore cycle count at branch targets
9410       if(bt[i]) {
9411         for(j=i;j<slen-1;j++) {
9412           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9413           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9414             //printf("no free regs for store %x\n",start+j*4);
9415             break;
9416           }
9417         }
9418         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9419           int k=i;
9420           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9421           while(k<j) {
9422             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9423             regs[k].regmap[HOST_CCREG]=CCREG;
9424             regmap_pre[k+1][HOST_CCREG]=CCREG;
9425             regs[k+1].wasdirty|=1<<HOST_CCREG;
9426             regs[k].dirty|=1<<HOST_CCREG;
9427             regs[k].wasconst&=~(1<<HOST_CCREG);
9428             regs[k].isconst&=~(1<<HOST_CCREG);
9429             k++;
9430           }
9431           regs[j].regmap_entry[HOST_CCREG]=CCREG;
9432         }
9433         // Work backwards from the branch target
9434         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9435         {
9436           //printf("Extend backwards\n");
9437           int k;
9438           k=i;
9439           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9440             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9441               //printf("no free regs for store %x\n",start+(k-1)*4);
9442               break;
9443             }
9444             k--;
9445           }
9446           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9447             //printf("Extend CC, %x ->\n",start+k*4);
9448             while(k<=i) {
9449               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9450               regs[k].regmap[HOST_CCREG]=CCREG;
9451               regmap_pre[k+1][HOST_CCREG]=CCREG;
9452               regs[k+1].wasdirty|=1<<HOST_CCREG;
9453               regs[k].dirty|=1<<HOST_CCREG;
9454               regs[k].wasconst&=~(1<<HOST_CCREG);
9455               regs[k].isconst&=~(1<<HOST_CCREG);
9456               k++;
9457             }
9458           }
9459           else {
9460             //printf("Fail Extend CC, %x ->\n",start+k*4);
9461           }
9462         }
9463       }
9464       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9465          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9466          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9467          itype[i]!=FCONV&&itype[i]!=FCOMP)
9468       {
9469         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9470       }
9471     }
9472   }
9473
9474   // Cache memory offset or tlb map pointer if a register is available
9475   #ifndef HOST_IMM_ADDR32
9476   #ifndef RAM_OFFSET
9477   if(0)
9478   #endif
9479   {
9480     int earliest_available[HOST_REGS];
9481     int loop_start[HOST_REGS];
9482     int score[HOST_REGS];
9483     int end[HOST_REGS];
9484     int reg=ROREG;
9485
9486     // Init
9487     for(hr=0;hr<HOST_REGS;hr++) {
9488       score[hr]=0;earliest_available[hr]=0;
9489       loop_start[hr]=MAXBLOCK;
9490     }
9491     for(i=0;i<slen-1;i++)
9492     {
9493       // Can't do anything if no registers are available
9494       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
9495         for(hr=0;hr<HOST_REGS;hr++) {
9496           score[hr]=0;earliest_available[hr]=i+1;
9497           loop_start[hr]=MAXBLOCK;
9498         }
9499       }
9500       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9501         if(!ooo[i]) {
9502           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
9503             for(hr=0;hr<HOST_REGS;hr++) {
9504               score[hr]=0;earliest_available[hr]=i+1;
9505               loop_start[hr]=MAXBLOCK;
9506             }
9507           }
9508         }else{
9509           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
9510             for(hr=0;hr<HOST_REGS;hr++) {
9511               score[hr]=0;earliest_available[hr]=i+1;
9512               loop_start[hr]=MAXBLOCK;
9513             }
9514           }
9515         }
9516       }
9517       // Mark unavailable registers
9518       for(hr=0;hr<HOST_REGS;hr++) {
9519         if(regs[i].regmap[hr]>=0) {
9520           score[hr]=0;earliest_available[hr]=i+1;
9521           loop_start[hr]=MAXBLOCK;
9522         }
9523         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9524           if(branch_regs[i].regmap[hr]>=0) {
9525             score[hr]=0;earliest_available[hr]=i+2;
9526             loop_start[hr]=MAXBLOCK;
9527           }
9528         }
9529       }
9530       // No register allocations after unconditional jumps
9531       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
9532       {
9533         for(hr=0;hr<HOST_REGS;hr++) {
9534           score[hr]=0;earliest_available[hr]=i+2;
9535           loop_start[hr]=MAXBLOCK;
9536         }
9537         i++; // Skip delay slot too
9538         //printf("skip delay slot: %x\n",start+i*4);
9539       }
9540       else
9541       // Possible match
9542       if(itype[i]==LOAD||itype[i]==LOADLR||
9543          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
9544         for(hr=0;hr<HOST_REGS;hr++) {
9545           if(hr!=EXCLUDE_REG) {
9546             end[hr]=i-1;
9547             for(j=i;j<slen-1;j++) {
9548               if(regs[j].regmap[hr]>=0) break;
9549               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9550                 if(branch_regs[j].regmap[hr]>=0) break;
9551                 if(ooo[j]) {
9552                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
9553                 }else{
9554                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
9555                 }
9556               }
9557               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
9558               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9559                 int t=(ba[j]-start)>>2;
9560                 if(t<j&&t>=earliest_available[hr]) {
9561                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
9562                     // Score a point for hoisting loop invariant
9563                     if(t<loop_start[hr]) loop_start[hr]=t;
9564                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
9565                     score[hr]++;
9566                     end[hr]=j;
9567                   }
9568                 }
9569                 else if(t<j) {
9570                   if(regs[t].regmap[hr]==reg) {
9571                     // Score a point if the branch target matches this register
9572                     score[hr]++;
9573                     end[hr]=j;
9574                   }
9575                 }
9576                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
9577                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
9578                   score[hr]++;
9579                   end[hr]=j;
9580                 }
9581               }
9582               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9583               {
9584                 // Stop on unconditional branch
9585                 break;
9586               }
9587               else
9588               if(itype[j]==LOAD||itype[j]==LOADLR||
9589                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
9590                 score[hr]++;
9591                 end[hr]=j;
9592               }
9593             }
9594           }
9595         }
9596         // Find highest score and allocate that register
9597         int maxscore=0;
9598         for(hr=0;hr<HOST_REGS;hr++) {
9599           if(hr!=EXCLUDE_REG) {
9600             if(score[hr]>score[maxscore]) {
9601               maxscore=hr;
9602               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
9603             }
9604           }
9605         }
9606         if(score[maxscore]>1)
9607         {
9608           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
9609           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
9610             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
9611             assert(regs[j].regmap[maxscore]<0);
9612             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
9613             regs[j].regmap[maxscore]=reg;
9614             regs[j].dirty&=~(1<<maxscore);
9615             regs[j].wasconst&=~(1<<maxscore);
9616             regs[j].isconst&=~(1<<maxscore);
9617             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9618               branch_regs[j].regmap[maxscore]=reg;
9619               branch_regs[j].wasdirty&=~(1<<maxscore);
9620               branch_regs[j].dirty&=~(1<<maxscore);
9621               branch_regs[j].wasconst&=~(1<<maxscore);
9622               branch_regs[j].isconst&=~(1<<maxscore);
9623               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
9624                 regmap_pre[j+2][maxscore]=reg;
9625                 regs[j+2].wasdirty&=~(1<<maxscore);
9626               }
9627               // loop optimization (loop_preload)
9628               int t=(ba[j]-start)>>2;
9629               if(t==loop_start[maxscore]) {
9630                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
9631                   regs[t].regmap_entry[maxscore]=reg;
9632               }
9633             }
9634             else
9635             {
9636               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
9637                 regmap_pre[j+1][maxscore]=reg;
9638                 regs[j+1].wasdirty&=~(1<<maxscore);
9639               }
9640             }
9641           }
9642           i=j-1;
9643           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
9644           for(hr=0;hr<HOST_REGS;hr++) {
9645             score[hr]=0;earliest_available[hr]=i+i;
9646             loop_start[hr]=MAXBLOCK;
9647           }
9648         }
9649       }
9650     }
9651   }
9652   #endif
9653
9654   // This allocates registers (if possible) one instruction prior
9655   // to use, which can avoid a load-use penalty on certain CPUs.
9656   for(i=0;i<slen-1;i++)
9657   {
9658     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9659     {
9660       if(!bt[i+1])
9661       {
9662         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9663            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9664         {
9665           if(rs1[i+1]) {
9666             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9667             {
9668               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9669               {
9670                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9671                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9672                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9673                 regs[i].isconst&=~(1<<hr);
9674                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9675                 constmap[i][hr]=constmap[i+1][hr];
9676                 regs[i+1].wasdirty&=~(1<<hr);
9677                 regs[i].dirty&=~(1<<hr);
9678               }
9679             }
9680           }
9681           if(rs2[i+1]) {
9682             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9683             {
9684               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9685               {
9686                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9687                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9688                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9689                 regs[i].isconst&=~(1<<hr);
9690                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9691                 constmap[i][hr]=constmap[i+1][hr];
9692                 regs[i+1].wasdirty&=~(1<<hr);
9693                 regs[i].dirty&=~(1<<hr);
9694               }
9695             }
9696           }
9697           // Preload target address for load instruction (non-constant)
9698           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9699             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9700             {
9701               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9702               {
9703                 regs[i].regmap[hr]=rs1[i+1];
9704                 regmap_pre[i+1][hr]=rs1[i+1];
9705                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9706                 regs[i].isconst&=~(1<<hr);
9707                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9708                 constmap[i][hr]=constmap[i+1][hr];
9709                 regs[i+1].wasdirty&=~(1<<hr);
9710                 regs[i].dirty&=~(1<<hr);
9711               }
9712             }
9713           }
9714           // Load source into target register
9715           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9716             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9717             {
9718               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9719               {
9720                 regs[i].regmap[hr]=rs1[i+1];
9721                 regmap_pre[i+1][hr]=rs1[i+1];
9722                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9723                 regs[i].isconst&=~(1<<hr);
9724                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9725                 constmap[i][hr]=constmap[i+1][hr];
9726                 regs[i+1].wasdirty&=~(1<<hr);
9727                 regs[i].dirty&=~(1<<hr);
9728               }
9729             }
9730           }
9731           // Address for store instruction (non-constant)
9732           if(itype[i+1]==STORE||itype[i+1]==STORELR
9733              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
9734             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9735               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
9736               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9737               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
9738               assert(hr>=0);
9739               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9740               {
9741                 regs[i].regmap[hr]=rs1[i+1];
9742                 regmap_pre[i+1][hr]=rs1[i+1];
9743                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9744                 regs[i].isconst&=~(1<<hr);
9745                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9746                 constmap[i][hr]=constmap[i+1][hr];
9747                 regs[i+1].wasdirty&=~(1<<hr);
9748                 regs[i].dirty&=~(1<<hr);
9749               }
9750             }
9751           }
9752           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
9753             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9754               int nr;
9755               hr=get_reg(regs[i+1].regmap,FTEMP);
9756               assert(hr>=0);
9757               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9758               {
9759                 regs[i].regmap[hr]=rs1[i+1];
9760                 regmap_pre[i+1][hr]=rs1[i+1];
9761                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9762                 regs[i].isconst&=~(1<<hr);
9763                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9764                 constmap[i][hr]=constmap[i+1][hr];
9765                 regs[i+1].wasdirty&=~(1<<hr);
9766                 regs[i].dirty&=~(1<<hr);
9767               }
9768               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
9769               {
9770                 // move it to another register
9771                 regs[i+1].regmap[hr]=-1;
9772                 regmap_pre[i+2][hr]=-1;
9773                 regs[i+1].regmap[nr]=FTEMP;
9774                 regmap_pre[i+2][nr]=FTEMP;
9775                 regs[i].regmap[nr]=rs1[i+1];
9776                 regmap_pre[i+1][nr]=rs1[i+1];
9777                 regs[i+1].regmap_entry[nr]=rs1[i+1];
9778                 regs[i].isconst&=~(1<<nr);
9779                 regs[i+1].isconst&=~(1<<nr);
9780                 regs[i].dirty&=~(1<<nr);
9781                 regs[i+1].wasdirty&=~(1<<nr);
9782                 regs[i+1].dirty&=~(1<<nr);
9783                 regs[i+2].wasdirty&=~(1<<nr);
9784               }
9785             }
9786           }
9787           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
9788             if(itype[i+1]==LOAD)
9789               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
9790             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
9791               hr=get_reg(regs[i+1].regmap,FTEMP);
9792             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
9793               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
9794               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9795             }
9796             if(hr>=0&&regs[i].regmap[hr]<0) {
9797               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
9798               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
9799                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
9800                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
9801                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
9802                 regs[i].isconst&=~(1<<hr);
9803                 regs[i+1].wasdirty&=~(1<<hr);
9804                 regs[i].dirty&=~(1<<hr);
9805               }
9806             }
9807           }
9808         }
9809       }
9810     }
9811   }
9812
9813   /* Pass 6 - Optimize clean/dirty state */
9814   clean_registers(0,slen-1,1);
9815
9816   /* Pass 7 - Identify 32-bit registers */
9817   for (i=slen-1;i>=0;i--)
9818   {
9819     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9820     {
9821       // Conditional branch
9822       if((source[i]>>16)!=0x1000&&i<slen-2) {
9823         // Mark this address as a branch target since it may be called
9824         // upon return from interrupt
9825         bt[i+2]=1;
9826       }
9827     }
9828   }
9829
9830   if(itype[slen-1]==SPAN) {
9831     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
9832   }
9833
9834 #ifdef DISASM
9835   /* Debug/disassembly */
9836   for(i=0;i<slen;i++)
9837   {
9838     printf("U:");
9839     int r;
9840     for(r=1;r<=CCREG;r++) {
9841       if((unneeded_reg[i]>>r)&1) {
9842         if(r==HIREG) printf(" HI");
9843         else if(r==LOREG) printf(" LO");
9844         else printf(" r%d",r);
9845       }
9846     }
9847     printf("\n");
9848     #if defined(__i386__) || defined(__x86_64__)
9849     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
9850     #endif
9851     #ifdef __arm__
9852     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
9853     #endif
9854     printf("needs: ");
9855     if(needed_reg[i]&1) printf("eax ");
9856     if((needed_reg[i]>>1)&1) printf("ecx ");
9857     if((needed_reg[i]>>2)&1) printf("edx ");
9858     if((needed_reg[i]>>3)&1) printf("ebx ");
9859     if((needed_reg[i]>>5)&1) printf("ebp ");
9860     if((needed_reg[i]>>6)&1) printf("esi ");
9861     if((needed_reg[i]>>7)&1) printf("edi ");
9862     printf("\n");
9863     #if defined(__i386__) || defined(__x86_64__)
9864     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
9865     printf("dirty: ");
9866     if(regs[i].wasdirty&1) printf("eax ");
9867     if((regs[i].wasdirty>>1)&1) printf("ecx ");
9868     if((regs[i].wasdirty>>2)&1) printf("edx ");
9869     if((regs[i].wasdirty>>3)&1) printf("ebx ");
9870     if((regs[i].wasdirty>>5)&1) printf("ebp ");
9871     if((regs[i].wasdirty>>6)&1) printf("esi ");
9872     if((regs[i].wasdirty>>7)&1) printf("edi ");
9873     #endif
9874     #ifdef __arm__
9875     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
9876     printf("dirty: ");
9877     if(regs[i].wasdirty&1) printf("r0 ");
9878     if((regs[i].wasdirty>>1)&1) printf("r1 ");
9879     if((regs[i].wasdirty>>2)&1) printf("r2 ");
9880     if((regs[i].wasdirty>>3)&1) printf("r3 ");
9881     if((regs[i].wasdirty>>4)&1) printf("r4 ");
9882     if((regs[i].wasdirty>>5)&1) printf("r5 ");
9883     if((regs[i].wasdirty>>6)&1) printf("r6 ");
9884     if((regs[i].wasdirty>>7)&1) printf("r7 ");
9885     if((regs[i].wasdirty>>8)&1) printf("r8 ");
9886     if((regs[i].wasdirty>>9)&1) printf("r9 ");
9887     if((regs[i].wasdirty>>10)&1) printf("r10 ");
9888     if((regs[i].wasdirty>>12)&1) printf("r12 ");
9889     #endif
9890     printf("\n");
9891     disassemble_inst(i);
9892     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
9893     #if defined(__i386__) || defined(__x86_64__)
9894     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
9895     if(regs[i].dirty&1) printf("eax ");
9896     if((regs[i].dirty>>1)&1) printf("ecx ");
9897     if((regs[i].dirty>>2)&1) printf("edx ");
9898     if((regs[i].dirty>>3)&1) printf("ebx ");
9899     if((regs[i].dirty>>5)&1) printf("ebp ");
9900     if((regs[i].dirty>>6)&1) printf("esi ");
9901     if((regs[i].dirty>>7)&1) printf("edi ");
9902     #endif
9903     #ifdef __arm__
9904     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
9905     if(regs[i].dirty&1) printf("r0 ");
9906     if((regs[i].dirty>>1)&1) printf("r1 ");
9907     if((regs[i].dirty>>2)&1) printf("r2 ");
9908     if((regs[i].dirty>>3)&1) printf("r3 ");
9909     if((regs[i].dirty>>4)&1) printf("r4 ");
9910     if((regs[i].dirty>>5)&1) printf("r5 ");
9911     if((regs[i].dirty>>6)&1) printf("r6 ");
9912     if((regs[i].dirty>>7)&1) printf("r7 ");
9913     if((regs[i].dirty>>8)&1) printf("r8 ");
9914     if((regs[i].dirty>>9)&1) printf("r9 ");
9915     if((regs[i].dirty>>10)&1) printf("r10 ");
9916     if((regs[i].dirty>>12)&1) printf("r12 ");
9917     #endif
9918     printf("\n");
9919     if(regs[i].isconst) {
9920       printf("constants: ");
9921       #if defined(__i386__) || defined(__x86_64__)
9922       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
9923       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
9924       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
9925       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
9926       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
9927       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
9928       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
9929       #endif
9930       #ifdef __arm__
9931       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
9932       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
9933       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
9934       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
9935       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
9936       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
9937       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
9938       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
9939       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
9940       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
9941       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
9942       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
9943       #endif
9944       printf("\n");
9945     }
9946     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9947       #if defined(__i386__) || defined(__x86_64__)
9948       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
9949       if(branch_regs[i].dirty&1) printf("eax ");
9950       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
9951       if((branch_regs[i].dirty>>2)&1) printf("edx ");
9952       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
9953       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
9954       if((branch_regs[i].dirty>>6)&1) printf("esi ");
9955       if((branch_regs[i].dirty>>7)&1) printf("edi ");
9956       #endif
9957       #ifdef __arm__
9958       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
9959       if(branch_regs[i].dirty&1) printf("r0 ");
9960       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
9961       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
9962       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
9963       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
9964       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
9965       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
9966       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
9967       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
9968       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
9969       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
9970       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
9971       #endif
9972     }
9973   }
9974 #endif // DISASM
9975
9976   /* Pass 8 - Assembly */
9977   linkcount=0;stubcount=0;
9978   ds=0;is_delayslot=0;
9979   cop1_usable=0;
9980   uint64_t is32_pre=0;
9981   u_int dirty_pre=0;
9982   void *beginning=start_block();
9983   if((u_int)addr&1) {
9984     ds=1;
9985     pagespan_ds();
9986   }
9987   u_int instr_addr0_override=0;
9988
9989   if (start == 0x80030000) {
9990     // nasty hack for fastbios thing
9991     // override block entry to this code
9992     instr_addr0_override=(u_int)out;
9993     emit_movimm(start,0);
9994     // abuse io address var as a flag that we
9995     // have already returned here once
9996     emit_readword((int)&address,1);
9997     emit_writeword(0,(int)&pcaddr);
9998     emit_writeword(0,(int)&address);
9999     emit_cmp(0,1);
10000     emit_jne((int)new_dyna_leave);
10001   }
10002   for(i=0;i<slen;i++)
10003   {
10004     //if(ds) printf("ds: ");
10005     disassemble_inst(i);
10006     if(ds) {
10007       ds=0; // Skip delay slot
10008       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10009       instr_addr[i]=0;
10010     } else {
10011       speculate_register_values(i);
10012       #ifndef DESTRUCTIVE_WRITEBACK
10013       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10014       {
10015         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10016               unneeded_reg[i],unneeded_reg_upper[i]);
10017       }
10018       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
10019         is32_pre=branch_regs[i].is32;
10020         dirty_pre=branch_regs[i].dirty;
10021       }else{
10022         is32_pre=regs[i].is32;
10023         dirty_pre=regs[i].dirty;
10024       }
10025       #endif
10026       // write back
10027       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10028       {
10029         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10030                       unneeded_reg[i],unneeded_reg_upper[i]);
10031         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10032       }
10033       // branch target entry point
10034       instr_addr[i]=(u_int)out;
10035       assem_debug("<->\n");
10036       // load regs
10037       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10038         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10039       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10040       address_generation(i,&regs[i],regs[i].regmap_entry);
10041       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10042       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10043       {
10044         // Load the delay slot registers if necessary
10045         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
10046           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10047         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
10048           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10049         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10050           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10051       }
10052       else if(i+1<slen)
10053       {
10054         // Preload registers for following instruction
10055         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10056           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10057             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10058         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10059           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10060             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10061       }
10062       // TODO: if(is_ooo(i)) address_generation(i+1);
10063       if(itype[i]==CJUMP||itype[i]==FJUMP)
10064         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10065       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10066         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10067       if(bt[i]) cop1_usable=0;
10068       // assemble
10069       switch(itype[i]) {
10070         case ALU:
10071           alu_assemble(i,&regs[i]);break;
10072         case IMM16:
10073           imm16_assemble(i,&regs[i]);break;
10074         case SHIFT:
10075           shift_assemble(i,&regs[i]);break;
10076         case SHIFTIMM:
10077           shiftimm_assemble(i,&regs[i]);break;
10078         case LOAD:
10079           load_assemble(i,&regs[i]);break;
10080         case LOADLR:
10081           loadlr_assemble(i,&regs[i]);break;
10082         case STORE:
10083           store_assemble(i,&regs[i]);break;
10084         case STORELR:
10085           storelr_assemble(i,&regs[i]);break;
10086         case COP0:
10087           cop0_assemble(i,&regs[i]);break;
10088         case COP1:
10089           cop1_assemble(i,&regs[i]);break;
10090         case C1LS:
10091           c1ls_assemble(i,&regs[i]);break;
10092         case COP2:
10093           cop2_assemble(i,&regs[i]);break;
10094         case C2LS:
10095           c2ls_assemble(i,&regs[i]);break;
10096         case C2OP:
10097           c2op_assemble(i,&regs[i]);break;
10098         case FCONV:
10099           fconv_assemble(i,&regs[i]);break;
10100         case FLOAT:
10101           float_assemble(i,&regs[i]);break;
10102         case FCOMP:
10103           fcomp_assemble(i,&regs[i]);break;
10104         case MULTDIV:
10105           multdiv_assemble(i,&regs[i]);break;
10106         case MOV:
10107           mov_assemble(i,&regs[i]);break;
10108         case SYSCALL:
10109           syscall_assemble(i,&regs[i]);break;
10110         case HLECALL:
10111           hlecall_assemble(i,&regs[i]);break;
10112         case INTCALL:
10113           intcall_assemble(i,&regs[i]);break;
10114         case UJUMP:
10115           ujump_assemble(i,&regs[i]);ds=1;break;
10116         case RJUMP:
10117           rjump_assemble(i,&regs[i]);ds=1;break;
10118         case CJUMP:
10119           cjump_assemble(i,&regs[i]);ds=1;break;
10120         case SJUMP:
10121           sjump_assemble(i,&regs[i]);ds=1;break;
10122         case FJUMP:
10123           fjump_assemble(i,&regs[i]);ds=1;break;
10124         case SPAN:
10125           pagespan_assemble(i,&regs[i]);break;
10126       }
10127       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10128         literal_pool(1024);
10129       else
10130         literal_pool_jumpover(256);
10131     }
10132   }
10133   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10134   // If the block did not end with an unconditional branch,
10135   // add a jump to the next instruction.
10136   if(i>1) {
10137     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10138       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10139       assert(i==slen);
10140       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10141         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10142         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10143           emit_loadreg(CCREG,HOST_CCREG);
10144         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10145       }
10146       else if(!likely[i-2])
10147       {
10148         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10149         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10150       }
10151       else
10152       {
10153         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10154         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10155       }
10156       add_to_linker((int)out,start+i*4,0);
10157       emit_jmp(0);
10158     }
10159   }
10160   else
10161   {
10162     assert(i>0);
10163     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10164     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10165     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10166       emit_loadreg(CCREG,HOST_CCREG);
10167     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10168     add_to_linker((int)out,start+i*4,0);
10169     emit_jmp(0);
10170   }
10171
10172   // TODO: delay slot stubs?
10173   // Stubs
10174   for(i=0;i<stubcount;i++)
10175   {
10176     switch(stubs[i][0])
10177     {
10178       case LOADB_STUB:
10179       case LOADH_STUB:
10180       case LOADW_STUB:
10181       case LOADD_STUB:
10182       case LOADBU_STUB:
10183       case LOADHU_STUB:
10184         do_readstub(i);break;
10185       case STOREB_STUB:
10186       case STOREH_STUB:
10187       case STOREW_STUB:
10188       case STORED_STUB:
10189         do_writestub(i);break;
10190       case CC_STUB:
10191         do_ccstub(i);break;
10192       case INVCODE_STUB:
10193         do_invstub(i);break;
10194       case FP_STUB:
10195         do_cop1stub(i);break;
10196       case STORELR_STUB:
10197         do_unalignedwritestub(i);break;
10198     }
10199   }
10200
10201   if (instr_addr0_override)
10202     instr_addr[0] = instr_addr0_override;
10203
10204   /* Pass 9 - Linker */
10205   for(i=0;i<linkcount;i++)
10206   {
10207     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10208     literal_pool(64);
10209     if(!link_addr[i][2])
10210     {
10211       void *stub=out;
10212       void *addr=check_addr(link_addr[i][1]);
10213       emit_extjump(link_addr[i][0],link_addr[i][1]);
10214       if(addr) {
10215         set_jump_target(link_addr[i][0],(int)addr);
10216         add_link(link_addr[i][1],stub);
10217       }
10218       else set_jump_target(link_addr[i][0],(int)stub);
10219     }
10220     else
10221     {
10222       // Internal branch
10223       int target=(link_addr[i][1]-start)>>2;
10224       assert(target>=0&&target<slen);
10225       assert(instr_addr[target]);
10226       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10227       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10228       //#else
10229       set_jump_target(link_addr[i][0],instr_addr[target]);
10230       //#endif
10231     }
10232   }
10233   // External Branch Targets (jump_in)
10234   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10235   for(i=0;i<slen;i++)
10236   {
10237     if(bt[i]||i==0)
10238     {
10239       if(instr_addr[i]) // TODO - delay slots (=null)
10240       {
10241         u_int vaddr=start+i*4;
10242         u_int page=get_page(vaddr);
10243         u_int vpage=get_vpage(vaddr);
10244         literal_pool(256);
10245         {
10246           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10247           assem_debug("jump_in: %x\n",start+i*4);
10248           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10249           int entry_point=do_dirty_stub(i);
10250           ll_add_flags(jump_in+page,vaddr,state_rflags,(void *)entry_point);
10251           // If there was an existing entry in the hash table,
10252           // replace it with the new address.
10253           // Don't add new entries.  We'll insert the
10254           // ones that actually get used in check_addr().
10255           u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10256           if(ht_bin[0]==vaddr) {
10257             ht_bin[1]=entry_point;
10258           }
10259           if(ht_bin[2]==vaddr) {
10260             ht_bin[3]=entry_point;
10261           }
10262         }
10263       }
10264     }
10265   }
10266   // Write out the literal pool if necessary
10267   literal_pool(0);
10268   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10269   // Align code
10270   if(((u_int)out)&7) emit_addnop(13);
10271   #endif
10272   assert((u_int)out-(u_int)beginning<MAX_OUTPUT_BLOCK_SIZE);
10273   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10274   memcpy(copy,source,slen*4);
10275   copy+=slen*4;
10276
10277   end_block(beginning);
10278
10279   // If we're within 256K of the end of the buffer,
10280   // start over from the beginning. (Is 256K enough?)
10281   if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10282
10283   // Trap writes to any of the pages we compiled
10284   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10285     invalid_code[i]=0;
10286   }
10287   inv_code_start=inv_code_end=~0;
10288
10289   // for PCSX we need to mark all mirrors too
10290   if(get_page(start)<(RAM_SIZE>>12))
10291     for(i=start>>12;i<=(start+slen*4)>>12;i++)
10292       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
10293       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
10294       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
10295
10296   /* Pass 10 - Free memory by expiring oldest blocks */
10297
10298   int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10299   while(expirep!=end)
10300   {
10301     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10302     int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10303     inv_debug("EXP: Phase %d\n",expirep);
10304     switch((expirep>>11)&3)
10305     {
10306       case 0:
10307         // Clear jump_in and jump_dirty
10308         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10309         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10310         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10311         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10312         break;
10313       case 1:
10314         // Clear pointers
10315         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10316         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10317         break;
10318       case 2:
10319         // Clear hash table
10320         for(i=0;i<32;i++) {
10321           u_int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10322           if((ht_bin[3]>>shift)==(base>>shift) ||
10323              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10324             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10325             ht_bin[2]=ht_bin[3]=-1;
10326           }
10327           if((ht_bin[1]>>shift)==(base>>shift) ||
10328              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10329             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10330             ht_bin[0]=ht_bin[2];
10331             ht_bin[1]=ht_bin[3];
10332             ht_bin[2]=ht_bin[3]=-1;
10333           }
10334         }
10335         break;
10336       case 3:
10337         // Clear jump_out
10338         #ifdef __arm__
10339         if((expirep&2047)==0)
10340           do_clear_cache();
10341         #endif
10342         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10343         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10344         break;
10345     }
10346     expirep=(expirep+1)&65535;
10347   }
10348   return 0;
10349 }
10350
10351 // vim:shiftwidth=2:expandtab