some drc debug helpers
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26 #ifdef __MACH__
27 #include <libkern/OSCacheControl.h>
28 #endif
29 #ifdef _3DS
30 #include <3ds_utils.h>
31 #endif
32 #ifdef VITA
33 #include <psp2/kernel/sysmem.h>
34 static int sceBlock;
35 #endif
36
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h" //emulator interface
39 #include "emu_if.h" //emulator interface
40
41 //#define DISASM
42 //#define assem_debug printf
43 //#define inv_debug printf
44 #define assem_debug(...)
45 #define inv_debug(...)
46
47 #ifdef __i386__
48 #include "assem_x86.h"
49 #endif
50 #ifdef __x86_64__
51 #include "assem_x64.h"
52 #endif
53 #ifdef __arm__
54 #include "assem_arm.h"
55 #endif
56
57 #define MAXBLOCK 4096
58 #define MAX_OUTPUT_BLOCK_SIZE 262144
59
60 struct regstat
61 {
62   signed char regmap_entry[HOST_REGS];
63   signed char regmap[HOST_REGS];
64   uint64_t was32;
65   uint64_t is32;
66   uint64_t wasdirty;
67   uint64_t dirty;
68   uint64_t u;
69   uint64_t uu;
70   u_int wasconst;
71   u_int isconst;
72   u_int loadedconst;             // host regs that have constants loaded
73   u_int waswritten;              // MIPS regs that were used as store base before
74 };
75
76 // note: asm depends on this layout
77 struct ll_entry
78 {
79   u_int vaddr;
80   u_int reg_sv_flags;
81   void *addr;
82   struct ll_entry *next;
83 };
84
85   // used by asm:
86   u_char *out;
87   u_int hash_table[65536][4]  __attribute__((aligned(16)));
88   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
89   struct ll_entry *jump_dirty[4096];
90
91   static struct ll_entry *jump_out[4096];
92   static u_int start;
93   static u_int *source;
94   static char insn[MAXBLOCK][10];
95   static u_char itype[MAXBLOCK];
96   static u_char opcode[MAXBLOCK];
97   static u_char opcode2[MAXBLOCK];
98   static u_char bt[MAXBLOCK];
99   static u_char rs1[MAXBLOCK];
100   static u_char rs2[MAXBLOCK];
101   static u_char rt1[MAXBLOCK];
102   static u_char rt2[MAXBLOCK];
103   static u_char us1[MAXBLOCK];
104   static u_char us2[MAXBLOCK];
105   static u_char dep1[MAXBLOCK];
106   static u_char dep2[MAXBLOCK];
107   static u_char lt1[MAXBLOCK];
108   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
109   static uint64_t gte_rt[MAXBLOCK];
110   static uint64_t gte_unneeded[MAXBLOCK];
111   static u_int smrv[32]; // speculated MIPS register values
112   static u_int smrv_strong; // mask or regs that are likely to have correct values
113   static u_int smrv_weak; // same, but somewhat less likely
114   static u_int smrv_strong_next; // same, but after current insn executes
115   static u_int smrv_weak_next;
116   static int imm[MAXBLOCK];
117   static u_int ba[MAXBLOCK];
118   static char likely[MAXBLOCK];
119   static char is_ds[MAXBLOCK];
120   static char ooo[MAXBLOCK];
121   static uint64_t unneeded_reg[MAXBLOCK];
122   static uint64_t unneeded_reg_upper[MAXBLOCK];
123   static uint64_t branch_unneeded_reg[MAXBLOCK];
124   static uint64_t branch_unneeded_reg_upper[MAXBLOCK];
125   static signed char regmap_pre[MAXBLOCK][HOST_REGS];
126   static uint64_t current_constmap[HOST_REGS];
127   static uint64_t constmap[MAXBLOCK][HOST_REGS];
128   static struct regstat regs[MAXBLOCK];
129   static struct regstat branch_regs[MAXBLOCK];
130   static signed char minimum_free_regs[MAXBLOCK];
131   static u_int needed_reg[MAXBLOCK];
132   static u_int wont_dirty[MAXBLOCK];
133   static u_int will_dirty[MAXBLOCK];
134   static int ccadj[MAXBLOCK];
135   static int slen;
136   static u_int instr_addr[MAXBLOCK];
137   static u_int link_addr[MAXBLOCK][3];
138   static int linkcount;
139   static u_int stubs[MAXBLOCK*3][8];
140   static int stubcount;
141   static u_int literals[1024][2];
142   static int literalcount;
143   static int is_delayslot;
144   static int cop1_usable;
145   static char shadow[1048576]  __attribute__((aligned(16)));
146   static void *copy;
147   static int expirep;
148   static u_int stop_after_jal;
149 #ifndef RAM_FIXED
150   static u_int ram_offset;
151 #else
152   static const u_int ram_offset=0;
153 #endif
154
155   int new_dynarec_hacks;
156   int new_dynarec_did_compile;
157   extern u_char restore_candidate[512];
158   extern int cycle_count;
159
160   /* registers that may be allocated */
161   /* 1-31 gpr */
162 #define HIREG 32 // hi
163 #define LOREG 33 // lo
164 #define FSREG 34 // FPU status (FCSR)
165 #define CSREG 35 // Coprocessor status
166 #define CCREG 36 // Cycle count
167 #define INVCP 37 // Pointer to invalid_code
168 //#define MMREG 38 // Pointer to memory_map
169 #define ROREG 39 // ram offset (if rdram!=0x80000000)
170 #define TEMPREG 40
171 #define FTEMP 40 // FPU temporary register
172 #define PTEMP 41 // Prefetch temporary register
173 //#define TLREG 42 // TLB mapping offset
174 #define RHASH 43 // Return address hash
175 #define RHTBL 44 // Return address hash table address
176 #define RTEMP 45 // JR/JALR address register
177 #define MAXREG 45
178 #define AGEN1 46 // Address generation temporary register
179 //#define AGEN2 47 // Address generation temporary register
180 //#define MGEN1 48 // Maptable address generation temporary register
181 //#define MGEN2 49 // Maptable address generation temporary register
182 #define BTREG 50 // Branch target temporary register
183
184   /* instruction types */
185 #define NOP 0     // No operation
186 #define LOAD 1    // Load
187 #define STORE 2   // Store
188 #define LOADLR 3  // Unaligned load
189 #define STORELR 4 // Unaligned store
190 #define MOV 5     // Move
191 #define ALU 6     // Arithmetic/logic
192 #define MULTDIV 7 // Multiply/divide
193 #define SHIFT 8   // Shift by register
194 #define SHIFTIMM 9// Shift by immediate
195 #define IMM16 10  // 16-bit immediate
196 #define RJUMP 11  // Unconditional jump to register
197 #define UJUMP 12  // Unconditional jump
198 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
199 #define SJUMP 14  // Conditional branch (regimm format)
200 #define COP0 15   // Coprocessor 0
201 #define COP1 16   // Coprocessor 1
202 #define C1LS 17   // Coprocessor 1 load/store
203 #define FJUMP 18  // Conditional branch (floating point)
204 #define FLOAT 19  // Floating point unit
205 #define FCONV 20  // Convert integer to float
206 #define FCOMP 21  // Floating point compare (sets FSREG)
207 #define SYSCALL 22// SYSCALL
208 #define OTHER 23  // Other
209 #define SPAN 24   // Branch/delay slot spans 2 pages
210 #define NI 25     // Not implemented
211 #define HLECALL 26// PCSX fake opcodes for HLE
212 #define COP2 27   // Coprocessor 2 move
213 #define C2LS 28   // Coprocessor 2 load/store
214 #define C2OP 29   // Coprocessor 2 operation
215 #define INTCALL 30// Call interpreter to handle rare corner cases
216
217   /* stubs */
218 #define CC_STUB 1
219 #define FP_STUB 2
220 #define LOADB_STUB 3
221 #define LOADH_STUB 4
222 #define LOADW_STUB 5
223 #define LOADD_STUB 6
224 #define LOADBU_STUB 7
225 #define LOADHU_STUB 8
226 #define STOREB_STUB 9
227 #define STOREH_STUB 10
228 #define STOREW_STUB 11
229 #define STORED_STUB 12
230 #define STORELR_STUB 13
231 #define INVCODE_STUB 14
232
233   /* branch codes */
234 #define TAKEN 1
235 #define NOTTAKEN 2
236 #define NULLDS 3
237
238 // asm linkage
239 int new_recompile_block(int addr);
240 void *get_addr_ht(u_int vaddr);
241 void invalidate_block(u_int block);
242 void invalidate_addr(u_int addr);
243 void remove_hash(int vaddr);
244 void dyna_linker();
245 void dyna_linker_ds();
246 void verify_code();
247 void verify_code_vm();
248 void verify_code_ds();
249 void cc_interrupt();
250 void fp_exception();
251 void fp_exception_ds();
252 void jump_syscall_hle();
253 void jump_hlecall();
254 void jump_intcall();
255 void new_dyna_leave();
256
257 // Needed by assembler
258 static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
259 static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
260 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
261 static void load_all_regs(signed char i_regmap[]);
262 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
263 static void load_regs_entry(int t);
264 static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
265
266 static int verify_dirty(u_int *ptr);
267 static int get_final_value(int hr, int i, int *value);
268 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e);
269 static void add_to_linker(int addr,int target,int ext);
270
271 static int tracedebug=0;
272
273 static void mprotect_w_x(void *start, void *end, int is_x)
274 {
275 #ifdef NO_WRITE_EXEC
276   #if defined(VITA)
277   // *Open* enables write on all memory that was
278   // allocated by sceKernelAllocMemBlockForVM()?
279   if (is_x)
280     sceKernelCloseVMDomain();
281   else
282     sceKernelOpenVMDomain();
283   #else
284   u_long mstart = (u_long)start & ~4095ul;
285   u_long mend = (u_long)end;
286   if (mprotect((void *)mstart, mend - mstart,
287                PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
288     SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
289   #endif
290 #endif
291 }
292
293 static void start_tcache_write(void *start, void *end)
294 {
295   mprotect_w_x(start, end, 0);
296 }
297
298 static void end_tcache_write(void *start, void *end)
299 {
300 #ifdef __arm__
301   size_t len = (char *)end - (char *)start;
302   #if   defined(__BLACKBERRY_QNX__)
303   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
304   #elif defined(__MACH__)
305   sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
306   #elif defined(VITA)
307   sceKernelSyncVMDomain(sceBlock, start, len);
308   #elif defined(_3DS)
309   ctr_flush_invalidate_cache();
310   #else
311   __clear_cache(start, end);
312   #endif
313   (void)len;
314 #endif
315
316   mprotect_w_x(start, end, 1);
317 }
318
319 static void *start_block(void)
320 {
321   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
322   if (end > (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2))
323     end = (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2);
324   start_tcache_write(out, end);
325   return out;
326 }
327
328 static void end_block(void *start)
329 {
330   end_tcache_write(start, out);
331 }
332
333 //#define DEBUG_CYCLE_COUNT 1
334
335 #define NO_CYCLE_PENALTY_THR 12
336
337 int cycle_multiplier; // 100 for 1.0
338
339 static int CLOCK_ADJUST(int x)
340 {
341   int s=(x>>31)|1;
342   return (x * cycle_multiplier + s * 50) / 100;
343 }
344
345 static u_int get_page(u_int vaddr)
346 {
347   u_int page=vaddr&~0xe0000000;
348   if (page < 0x1000000)
349     page &= ~0x0e00000; // RAM mirrors
350   page>>=12;
351   if(page>2048) page=2048+(page&2047);
352   return page;
353 }
354
355 // no virtual mem in PCSX
356 static u_int get_vpage(u_int vaddr)
357 {
358   return get_page(vaddr);
359 }
360
361 // Get address from virtual address
362 // This is called from the recompiled JR/JALR instructions
363 void *get_addr(u_int vaddr)
364 {
365   u_int page=get_page(vaddr);
366   u_int vpage=get_vpage(vaddr);
367   struct ll_entry *head;
368   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
369   head=jump_in[page];
370   while(head!=NULL) {
371     if(head->vaddr==vaddr) {
372   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
373       u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
374       ht_bin[3]=ht_bin[1];
375       ht_bin[2]=ht_bin[0];
376       ht_bin[1]=(u_int)head->addr;
377       ht_bin[0]=vaddr;
378       return head->addr;
379     }
380     head=head->next;
381   }
382   head=jump_dirty[vpage];
383   while(head!=NULL) {
384     if(head->vaddr==vaddr) {
385       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
386       // Don't restore blocks which are about to expire from the cache
387       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
388       if(verify_dirty(head->addr)) {
389         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
390         invalid_code[vaddr>>12]=0;
391         inv_code_start=inv_code_end=~0;
392         if(vpage<2048) {
393           restore_candidate[vpage>>3]|=1<<(vpage&7);
394         }
395         else restore_candidate[page>>3]|=1<<(page&7);
396         u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
397         if(ht_bin[0]==vaddr) {
398           ht_bin[1]=(u_int)head->addr; // Replace existing entry
399         }
400         else
401         {
402           ht_bin[3]=ht_bin[1];
403           ht_bin[2]=ht_bin[0];
404           ht_bin[1]=(int)head->addr;
405           ht_bin[0]=vaddr;
406         }
407         return head->addr;
408       }
409     }
410     head=head->next;
411   }
412   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
413   int r=new_recompile_block(vaddr);
414   if(r==0) return get_addr(vaddr);
415   // Execute in unmapped page, generate pagefault execption
416   Status|=2;
417   Cause=(vaddr<<31)|0x8;
418   EPC=(vaddr&1)?vaddr-5:vaddr;
419   BadVAddr=(vaddr&~1);
420   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
421   EntryHi=BadVAddr&0xFFFFE000;
422   return get_addr_ht(0x80000000);
423 }
424 // Look up address in hash table first
425 void *get_addr_ht(u_int vaddr)
426 {
427   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
428   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
429   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
430   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
431   return get_addr(vaddr);
432 }
433
434 void clear_all_regs(signed char regmap[])
435 {
436   int hr;
437   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
438 }
439
440 signed char get_reg(signed char regmap[],int r)
441 {
442   int hr;
443   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
444   return -1;
445 }
446
447 // Find a register that is available for two consecutive cycles
448 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
449 {
450   int hr;
451   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
452   return -1;
453 }
454
455 int count_free_regs(signed char regmap[])
456 {
457   int count=0;
458   int hr;
459   for(hr=0;hr<HOST_REGS;hr++)
460   {
461     if(hr!=EXCLUDE_REG) {
462       if(regmap[hr]<0) count++;
463     }
464   }
465   return count;
466 }
467
468 void dirty_reg(struct regstat *cur,signed char reg)
469 {
470   int hr;
471   if(!reg) return;
472   for (hr=0;hr<HOST_REGS;hr++) {
473     if((cur->regmap[hr]&63)==reg) {
474       cur->dirty|=1<<hr;
475     }
476   }
477 }
478
479 // If we dirty the lower half of a 64 bit register which is now being
480 // sign-extended, we need to dump the upper half.
481 // Note: Do this only after completion of the instruction, because
482 // some instructions may need to read the full 64-bit value even if
483 // overwriting it (eg SLTI, DSRA32).
484 static void flush_dirty_uppers(struct regstat *cur)
485 {
486   int hr,reg;
487   for (hr=0;hr<HOST_REGS;hr++) {
488     if((cur->dirty>>hr)&1) {
489       reg=cur->regmap[hr];
490       if(reg>=64)
491         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
492     }
493   }
494 }
495
496 void set_const(struct regstat *cur,signed char reg,uint64_t value)
497 {
498   int hr;
499   if(!reg) return;
500   for (hr=0;hr<HOST_REGS;hr++) {
501     if(cur->regmap[hr]==reg) {
502       cur->isconst|=1<<hr;
503       current_constmap[hr]=value;
504     }
505     else if((cur->regmap[hr]^64)==reg) {
506       cur->isconst|=1<<hr;
507       current_constmap[hr]=value>>32;
508     }
509   }
510 }
511
512 void clear_const(struct regstat *cur,signed char reg)
513 {
514   int hr;
515   if(!reg) return;
516   for (hr=0;hr<HOST_REGS;hr++) {
517     if((cur->regmap[hr]&63)==reg) {
518       cur->isconst&=~(1<<hr);
519     }
520   }
521 }
522
523 int is_const(struct regstat *cur,signed char reg)
524 {
525   int hr;
526   if(reg<0) return 0;
527   if(!reg) return 1;
528   for (hr=0;hr<HOST_REGS;hr++) {
529     if((cur->regmap[hr]&63)==reg) {
530       return (cur->isconst>>hr)&1;
531     }
532   }
533   return 0;
534 }
535 uint64_t get_const(struct regstat *cur,signed char reg)
536 {
537   int hr;
538   if(!reg) return 0;
539   for (hr=0;hr<HOST_REGS;hr++) {
540     if(cur->regmap[hr]==reg) {
541       return current_constmap[hr];
542     }
543   }
544   SysPrintf("Unknown constant in r%d\n",reg);
545   exit(1);
546 }
547
548 // Least soon needed registers
549 // Look at the next ten instructions and see which registers
550 // will be used.  Try not to reallocate these.
551 void lsn(u_char hsn[], int i, int *preferred_reg)
552 {
553   int j;
554   int b=-1;
555   for(j=0;j<9;j++)
556   {
557     if(i+j>=slen) {
558       j=slen-i-1;
559       break;
560     }
561     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
562     {
563       // Don't go past an unconditonal jump
564       j++;
565       break;
566     }
567   }
568   for(;j>=0;j--)
569   {
570     if(rs1[i+j]) hsn[rs1[i+j]]=j;
571     if(rs2[i+j]) hsn[rs2[i+j]]=j;
572     if(rt1[i+j]) hsn[rt1[i+j]]=j;
573     if(rt2[i+j]) hsn[rt2[i+j]]=j;
574     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
575       // Stores can allocate zero
576       hsn[rs1[i+j]]=j;
577       hsn[rs2[i+j]]=j;
578     }
579     // On some architectures stores need invc_ptr
580     #if defined(HOST_IMM8)
581     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
582       hsn[INVCP]=j;
583     }
584     #endif
585     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
586     {
587       hsn[CCREG]=j;
588       b=j;
589     }
590   }
591   if(b>=0)
592   {
593     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
594     {
595       // Follow first branch
596       int t=(ba[i+b]-start)>>2;
597       j=7-b;if(t+j>=slen) j=slen-t-1;
598       for(;j>=0;j--)
599       {
600         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
601         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
602         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
603         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
604       }
605     }
606     // TODO: preferred register based on backward branch
607   }
608   // Delay slot should preferably not overwrite branch conditions or cycle count
609   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
610     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
611     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
612     hsn[CCREG]=1;
613     // ...or hash tables
614     hsn[RHASH]=1;
615     hsn[RHTBL]=1;
616   }
617   // Coprocessor load/store needs FTEMP, even if not declared
618   if(itype[i]==C1LS||itype[i]==C2LS) {
619     hsn[FTEMP]=0;
620   }
621   // Load L/R also uses FTEMP as a temporary register
622   if(itype[i]==LOADLR) {
623     hsn[FTEMP]=0;
624   }
625   // Also SWL/SWR/SDL/SDR
626   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
627     hsn[FTEMP]=0;
628   }
629   // Don't remove the miniht registers
630   if(itype[i]==UJUMP||itype[i]==RJUMP)
631   {
632     hsn[RHASH]=0;
633     hsn[RHTBL]=0;
634   }
635 }
636
637 // We only want to allocate registers if we're going to use them again soon
638 int needed_again(int r, int i)
639 {
640   int j;
641   int b=-1;
642   int rn=10;
643
644   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
645   {
646     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
647       return 0; // Don't need any registers if exiting the block
648   }
649   for(j=0;j<9;j++)
650   {
651     if(i+j>=slen) {
652       j=slen-i-1;
653       break;
654     }
655     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
656     {
657       // Don't go past an unconditonal jump
658       j++;
659       break;
660     }
661     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
662     {
663       break;
664     }
665   }
666   for(;j>=1;j--)
667   {
668     if(rs1[i+j]==r) rn=j;
669     if(rs2[i+j]==r) rn=j;
670     if((unneeded_reg[i+j]>>r)&1) rn=10;
671     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
672     {
673       b=j;
674     }
675   }
676   /*
677   if(b>=0)
678   {
679     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
680     {
681       // Follow first branch
682       int o=rn;
683       int t=(ba[i+b]-start)>>2;
684       j=7-b;if(t+j>=slen) j=slen-t-1;
685       for(;j>=0;j--)
686       {
687         if(!((unneeded_reg[t+j]>>r)&1)) {
688           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
689           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
690         }
691         else rn=o;
692       }
693     }
694   }*/
695   if(rn<10) return 1;
696   (void)b;
697   return 0;
698 }
699
700 // Try to match register allocations at the end of a loop with those
701 // at the beginning
702 int loop_reg(int i, int r, int hr)
703 {
704   int j,k;
705   for(j=0;j<9;j++)
706   {
707     if(i+j>=slen) {
708       j=slen-i-1;
709       break;
710     }
711     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
712     {
713       // Don't go past an unconditonal jump
714       j++;
715       break;
716     }
717   }
718   k=0;
719   if(i>0){
720     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
721       k--;
722   }
723   for(;k<j;k++)
724   {
725     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
726     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
727     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
728     {
729       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
730       {
731         int t=(ba[i+k]-start)>>2;
732         int reg=get_reg(regs[t].regmap_entry,r);
733         if(reg>=0) return reg;
734         //reg=get_reg(regs[t+1].regmap_entry,r);
735         //if(reg>=0) return reg;
736       }
737     }
738   }
739   return hr;
740 }
741
742
743 // Allocate every register, preserving source/target regs
744 void alloc_all(struct regstat *cur,int i)
745 {
746   int hr;
747
748   for(hr=0;hr<HOST_REGS;hr++) {
749     if(hr!=EXCLUDE_REG) {
750       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
751          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
752       {
753         cur->regmap[hr]=-1;
754         cur->dirty&=~(1<<hr);
755       }
756       // Don't need zeros
757       if((cur->regmap[hr]&63)==0)
758       {
759         cur->regmap[hr]=-1;
760         cur->dirty&=~(1<<hr);
761       }
762     }
763   }
764 }
765
766 #ifdef __i386__
767 #include "assem_x86.c"
768 #endif
769 #ifdef __x86_64__
770 #include "assem_x64.c"
771 #endif
772 #ifdef __arm__
773 #include "assem_arm.c"
774 #endif
775
776 // Add virtual address mapping to linked list
777 void ll_add(struct ll_entry **head,int vaddr,void *addr)
778 {
779   struct ll_entry *new_entry;
780   new_entry=malloc(sizeof(struct ll_entry));
781   assert(new_entry!=NULL);
782   new_entry->vaddr=vaddr;
783   new_entry->reg_sv_flags=0;
784   new_entry->addr=addr;
785   new_entry->next=*head;
786   *head=new_entry;
787 }
788
789 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
790 {
791   ll_add(head,vaddr,addr);
792   (*head)->reg_sv_flags=reg_sv_flags;
793 }
794
795 // Check if an address is already compiled
796 // but don't return addresses which are about to expire from the cache
797 void *check_addr(u_int vaddr)
798 {
799   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
800   if(ht_bin[0]==vaddr) {
801     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
802       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
803   }
804   if(ht_bin[2]==vaddr) {
805     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
806       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
807   }
808   u_int page=get_page(vaddr);
809   struct ll_entry *head;
810   head=jump_in[page];
811   while(head!=NULL) {
812     if(head->vaddr==vaddr) {
813       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
814         // Update existing entry with current address
815         if(ht_bin[0]==vaddr) {
816           ht_bin[1]=(int)head->addr;
817           return head->addr;
818         }
819         if(ht_bin[2]==vaddr) {
820           ht_bin[3]=(int)head->addr;
821           return head->addr;
822         }
823         // Insert into hash table with low priority.
824         // Don't evict existing entries, as they are probably
825         // addresses that are being accessed frequently.
826         if(ht_bin[0]==-1) {
827           ht_bin[1]=(int)head->addr;
828           ht_bin[0]=vaddr;
829         }else if(ht_bin[2]==-1) {
830           ht_bin[3]=(int)head->addr;
831           ht_bin[2]=vaddr;
832         }
833         return head->addr;
834       }
835     }
836     head=head->next;
837   }
838   return 0;
839 }
840
841 void remove_hash(int vaddr)
842 {
843   //printf("remove hash: %x\n",vaddr);
844   u_int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
845   if(ht_bin[2]==vaddr) {
846     ht_bin[2]=ht_bin[3]=-1;
847   }
848   if(ht_bin[0]==vaddr) {
849     ht_bin[0]=ht_bin[2];
850     ht_bin[1]=ht_bin[3];
851     ht_bin[2]=ht_bin[3]=-1;
852   }
853 }
854
855 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
856 {
857   struct ll_entry *next;
858   while(*head) {
859     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
860        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
861     {
862       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
863       remove_hash((*head)->vaddr);
864       next=(*head)->next;
865       free(*head);
866       *head=next;
867     }
868     else
869     {
870       head=&((*head)->next);
871     }
872   }
873 }
874
875 // Remove all entries from linked list
876 void ll_clear(struct ll_entry **head)
877 {
878   struct ll_entry *cur;
879   struct ll_entry *next;
880   if((cur=*head)) {
881     *head=0;
882     while(cur) {
883       next=cur->next;
884       free(cur);
885       cur=next;
886     }
887   }
888 }
889
890 // Dereference the pointers and remove if it matches
891 static void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
892 {
893   while(head) {
894     int ptr=get_pointer(head->addr);
895     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
896     if(((ptr>>shift)==(addr>>shift)) ||
897        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
898     {
899       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
900       void *host_addr=find_extjump_insn(head->addr);
901       #ifdef __arm__
902         mark_clear_cache(host_addr);
903       #endif
904       set_jump_target((int)host_addr,(int)head->addr);
905     }
906     head=head->next;
907   }
908 }
909
910 // This is called when we write to a compiled block (see do_invstub)
911 void invalidate_page(u_int page)
912 {
913   struct ll_entry *head;
914   struct ll_entry *next;
915   head=jump_in[page];
916   jump_in[page]=0;
917   while(head!=NULL) {
918     inv_debug("INVALIDATE: %x\n",head->vaddr);
919     remove_hash(head->vaddr);
920     next=head->next;
921     free(head);
922     head=next;
923   }
924   head=jump_out[page];
925   jump_out[page]=0;
926   while(head!=NULL) {
927     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
928     void *host_addr=find_extjump_insn(head->addr);
929     #ifdef __arm__
930       mark_clear_cache(host_addr);
931     #endif
932     set_jump_target((int)host_addr,(int)head->addr);
933     next=head->next;
934     free(head);
935     head=next;
936   }
937 }
938
939 static void invalidate_block_range(u_int block, u_int first, u_int last)
940 {
941   u_int page=get_page(block<<12);
942   //printf("first=%d last=%d\n",first,last);
943   invalidate_page(page);
944   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
945   assert(last<page+5);
946   // Invalidate the adjacent pages if a block crosses a 4K boundary
947   while(first<page) {
948     invalidate_page(first);
949     first++;
950   }
951   for(first=page+1;first<last;first++) {
952     invalidate_page(first);
953   }
954   #ifdef __arm__
955     do_clear_cache();
956   #endif
957
958   // Don't trap writes
959   invalid_code[block]=1;
960
961   #ifdef USE_MINI_HT
962   memset(mini_ht,-1,sizeof(mini_ht));
963   #endif
964 }
965
966 void invalidate_block(u_int block)
967 {
968   u_int page=get_page(block<<12);
969   u_int vpage=get_vpage(block<<12);
970   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
971   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
972   u_int first,last;
973   first=last=page;
974   struct ll_entry *head;
975   head=jump_dirty[vpage];
976   //printf("page=%d vpage=%d\n",page,vpage);
977   while(head!=NULL) {
978     u_int start,end;
979     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
980       get_bounds((int)head->addr,&start,&end);
981       //printf("start: %x end: %x\n",start,end);
982       if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
983         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
984           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
985           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
986         }
987       }
988     }
989     head=head->next;
990   }
991   invalidate_block_range(block,first,last);
992 }
993
994 void invalidate_addr(u_int addr)
995 {
996   //static int rhits;
997   // this check is done by the caller
998   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
999   u_int page=get_vpage(addr);
1000   if(page<2048) { // RAM
1001     struct ll_entry *head;
1002     u_int addr_min=~0, addr_max=0;
1003     u_int mask=RAM_SIZE-1;
1004     u_int addr_main=0x80000000|(addr&mask);
1005     int pg1;
1006     inv_code_start=addr_main&~0xfff;
1007     inv_code_end=addr_main|0xfff;
1008     pg1=page;
1009     if (pg1>0) {
1010       // must check previous page too because of spans..
1011       pg1--;
1012       inv_code_start-=0x1000;
1013     }
1014     for(;pg1<=page;pg1++) {
1015       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1016         u_int start,end;
1017         get_bounds((int)head->addr,&start,&end);
1018         if(ram_offset) {
1019           start-=ram_offset;
1020           end-=ram_offset;
1021         }
1022         if(start<=addr_main&&addr_main<end) {
1023           if(start<addr_min) addr_min=start;
1024           if(end>addr_max) addr_max=end;
1025         }
1026         else if(addr_main<start) {
1027           if(start<inv_code_end)
1028             inv_code_end=start-1;
1029         }
1030         else {
1031           if(end>inv_code_start)
1032             inv_code_start=end;
1033         }
1034       }
1035     }
1036     if (addr_min!=~0) {
1037       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1038       inv_code_start=inv_code_end=~0;
1039       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1040       return;
1041     }
1042     else {
1043       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1044       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1045       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1046       return;
1047     }
1048   }
1049   invalidate_block(addr>>12);
1050 }
1051
1052 // This is called when loading a save state.
1053 // Anything could have changed, so invalidate everything.
1054 void invalidate_all_pages()
1055 {
1056   u_int page;
1057   for(page=0;page<4096;page++)
1058     invalidate_page(page);
1059   for(page=0;page<1048576;page++)
1060     if(!invalid_code[page]) {
1061       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1062       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1063     }
1064   #ifdef USE_MINI_HT
1065   memset(mini_ht,-1,sizeof(mini_ht));
1066   #endif
1067 }
1068
1069 // Add an entry to jump_out after making a link
1070 void add_link(u_int vaddr,void *src)
1071 {
1072   u_int page=get_page(vaddr);
1073   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1074   int *ptr=(int *)(src+4);
1075   assert((*ptr&0x0fff0000)==0x059f0000);
1076   (void)ptr;
1077   ll_add(jump_out+page,vaddr,src);
1078   //int ptr=get_pointer(src);
1079   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1080 }
1081
1082 // If a code block was found to be unmodified (bit was set in
1083 // restore_candidate) and it remains unmodified (bit is clear
1084 // in invalid_code) then move the entries for that 4K page from
1085 // the dirty list to the clean list.
1086 void clean_blocks(u_int page)
1087 {
1088   struct ll_entry *head;
1089   inv_debug("INV: clean_blocks page=%d\n",page);
1090   head=jump_dirty[page];
1091   while(head!=NULL) {
1092     if(!invalid_code[head->vaddr>>12]) {
1093       // Don't restore blocks which are about to expire from the cache
1094       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1095         u_int start,end;
1096         if(verify_dirty(head->addr)) {
1097           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1098           u_int i;
1099           u_int inv=0;
1100           get_bounds((int)head->addr,&start,&end);
1101           if(start-(u_int)rdram<RAM_SIZE) {
1102             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1103               inv|=invalid_code[i];
1104             }
1105           }
1106           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1107             inv=1;
1108           }
1109           if(!inv) {
1110             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1111             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1112               u_int ppage=page;
1113               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1114               //printf("page=%x, addr=%x\n",page,head->vaddr);
1115               //assert(head->vaddr>>12==(page|0x80000));
1116               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1117               u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1118               if(ht_bin[0]==head->vaddr) {
1119                 ht_bin[1]=(u_int)clean_addr; // Replace existing entry
1120               }
1121               if(ht_bin[2]==head->vaddr) {
1122                 ht_bin[3]=(u_int)clean_addr; // Replace existing entry
1123               }
1124             }
1125           }
1126         }
1127       }
1128     }
1129     head=head->next;
1130   }
1131 }
1132
1133
1134 void mov_alloc(struct regstat *current,int i)
1135 {
1136   // Note: Don't need to actually alloc the source registers
1137   if((~current->is32>>rs1[i])&1) {
1138     //alloc_reg64(current,i,rs1[i]);
1139     alloc_reg64(current,i,rt1[i]);
1140     current->is32&=~(1LL<<rt1[i]);
1141   } else {
1142     //alloc_reg(current,i,rs1[i]);
1143     alloc_reg(current,i,rt1[i]);
1144     current->is32|=(1LL<<rt1[i]);
1145   }
1146   clear_const(current,rs1[i]);
1147   clear_const(current,rt1[i]);
1148   dirty_reg(current,rt1[i]);
1149 }
1150
1151 void shiftimm_alloc(struct regstat *current,int i)
1152 {
1153   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1154   {
1155     if(rt1[i]) {
1156       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1157       else lt1[i]=rs1[i];
1158       alloc_reg(current,i,rt1[i]);
1159       current->is32|=1LL<<rt1[i];
1160       dirty_reg(current,rt1[i]);
1161       if(is_const(current,rs1[i])) {
1162         int v=get_const(current,rs1[i]);
1163         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1164         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1165         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1166       }
1167       else clear_const(current,rt1[i]);
1168     }
1169   }
1170   else
1171   {
1172     clear_const(current,rs1[i]);
1173     clear_const(current,rt1[i]);
1174   }
1175
1176   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1177   {
1178     if(rt1[i]) {
1179       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1180       alloc_reg64(current,i,rt1[i]);
1181       current->is32&=~(1LL<<rt1[i]);
1182       dirty_reg(current,rt1[i]);
1183     }
1184   }
1185   if(opcode2[i]==0x3c) // DSLL32
1186   {
1187     if(rt1[i]) {
1188       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1189       alloc_reg64(current,i,rt1[i]);
1190       current->is32&=~(1LL<<rt1[i]);
1191       dirty_reg(current,rt1[i]);
1192     }
1193   }
1194   if(opcode2[i]==0x3e) // DSRL32
1195   {
1196     if(rt1[i]) {
1197       alloc_reg64(current,i,rs1[i]);
1198       if(imm[i]==32) {
1199         alloc_reg64(current,i,rt1[i]);
1200         current->is32&=~(1LL<<rt1[i]);
1201       } else {
1202         alloc_reg(current,i,rt1[i]);
1203         current->is32|=1LL<<rt1[i];
1204       }
1205       dirty_reg(current,rt1[i]);
1206     }
1207   }
1208   if(opcode2[i]==0x3f) // DSRA32
1209   {
1210     if(rt1[i]) {
1211       alloc_reg64(current,i,rs1[i]);
1212       alloc_reg(current,i,rt1[i]);
1213       current->is32|=1LL<<rt1[i];
1214       dirty_reg(current,rt1[i]);
1215     }
1216   }
1217 }
1218
1219 void shift_alloc(struct regstat *current,int i)
1220 {
1221   if(rt1[i]) {
1222     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1223     {
1224       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1225       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1226       alloc_reg(current,i,rt1[i]);
1227       if(rt1[i]==rs2[i]) {
1228         alloc_reg_temp(current,i,-1);
1229         minimum_free_regs[i]=1;
1230       }
1231       current->is32|=1LL<<rt1[i];
1232     } else { // DSLLV/DSRLV/DSRAV
1233       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1234       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1235       alloc_reg64(current,i,rt1[i]);
1236       current->is32&=~(1LL<<rt1[i]);
1237       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1238       {
1239         alloc_reg_temp(current,i,-1);
1240         minimum_free_regs[i]=1;
1241       }
1242     }
1243     clear_const(current,rs1[i]);
1244     clear_const(current,rs2[i]);
1245     clear_const(current,rt1[i]);
1246     dirty_reg(current,rt1[i]);
1247   }
1248 }
1249
1250 void alu_alloc(struct regstat *current,int i)
1251 {
1252   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1253     if(rt1[i]) {
1254       if(rs1[i]&&rs2[i]) {
1255         alloc_reg(current,i,rs1[i]);
1256         alloc_reg(current,i,rs2[i]);
1257       }
1258       else {
1259         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1260         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1261       }
1262       alloc_reg(current,i,rt1[i]);
1263     }
1264     current->is32|=1LL<<rt1[i];
1265   }
1266   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1267     if(rt1[i]) {
1268       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1269       {
1270         alloc_reg64(current,i,rs1[i]);
1271         alloc_reg64(current,i,rs2[i]);
1272         alloc_reg(current,i,rt1[i]);
1273       } else {
1274         alloc_reg(current,i,rs1[i]);
1275         alloc_reg(current,i,rs2[i]);
1276         alloc_reg(current,i,rt1[i]);
1277       }
1278     }
1279     current->is32|=1LL<<rt1[i];
1280   }
1281   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1282     if(rt1[i]) {
1283       if(rs1[i]&&rs2[i]) {
1284         alloc_reg(current,i,rs1[i]);
1285         alloc_reg(current,i,rs2[i]);
1286       }
1287       else
1288       {
1289         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1290         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1291       }
1292       alloc_reg(current,i,rt1[i]);
1293       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1294       {
1295         if(!((current->uu>>rt1[i])&1)) {
1296           alloc_reg64(current,i,rt1[i]);
1297         }
1298         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1299           if(rs1[i]&&rs2[i]) {
1300             alloc_reg64(current,i,rs1[i]);
1301             alloc_reg64(current,i,rs2[i]);
1302           }
1303           else
1304           {
1305             // Is is really worth it to keep 64-bit values in registers?
1306             #ifdef NATIVE_64BIT
1307             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1308             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1309             #endif
1310           }
1311         }
1312         current->is32&=~(1LL<<rt1[i]);
1313       } else {
1314         current->is32|=1LL<<rt1[i];
1315       }
1316     }
1317   }
1318   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1319     if(rt1[i]) {
1320       if(rs1[i]&&rs2[i]) {
1321         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1322           alloc_reg64(current,i,rs1[i]);
1323           alloc_reg64(current,i,rs2[i]);
1324           alloc_reg64(current,i,rt1[i]);
1325         } else {
1326           alloc_reg(current,i,rs1[i]);
1327           alloc_reg(current,i,rs2[i]);
1328           alloc_reg(current,i,rt1[i]);
1329         }
1330       }
1331       else {
1332         alloc_reg(current,i,rt1[i]);
1333         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1334           // DADD used as move, or zeroing
1335           // If we have a 64-bit source, then make the target 64 bits too
1336           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1337             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1338             alloc_reg64(current,i,rt1[i]);
1339           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1340             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1341             alloc_reg64(current,i,rt1[i]);
1342           }
1343           if(opcode2[i]>=0x2e&&rs2[i]) {
1344             // DSUB used as negation - 64-bit result
1345             // If we have a 32-bit register, extend it to 64 bits
1346             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1347             alloc_reg64(current,i,rt1[i]);
1348           }
1349         }
1350       }
1351       if(rs1[i]&&rs2[i]) {
1352         current->is32&=~(1LL<<rt1[i]);
1353       } else if(rs1[i]) {
1354         current->is32&=~(1LL<<rt1[i]);
1355         if((current->is32>>rs1[i])&1)
1356           current->is32|=1LL<<rt1[i];
1357       } else if(rs2[i]) {
1358         current->is32&=~(1LL<<rt1[i]);
1359         if((current->is32>>rs2[i])&1)
1360           current->is32|=1LL<<rt1[i];
1361       } else {
1362         current->is32|=1LL<<rt1[i];
1363       }
1364     }
1365   }
1366   clear_const(current,rs1[i]);
1367   clear_const(current,rs2[i]);
1368   clear_const(current,rt1[i]);
1369   dirty_reg(current,rt1[i]);
1370 }
1371
1372 void imm16_alloc(struct regstat *current,int i)
1373 {
1374   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1375   else lt1[i]=rs1[i];
1376   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1377   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1378     current->is32&=~(1LL<<rt1[i]);
1379     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1380       // TODO: Could preserve the 32-bit flag if the immediate is zero
1381       alloc_reg64(current,i,rt1[i]);
1382       alloc_reg64(current,i,rs1[i]);
1383     }
1384     clear_const(current,rs1[i]);
1385     clear_const(current,rt1[i]);
1386   }
1387   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1388     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1389     current->is32|=1LL<<rt1[i];
1390     clear_const(current,rs1[i]);
1391     clear_const(current,rt1[i]);
1392   }
1393   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1394     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1395       if(rs1[i]!=rt1[i]) {
1396         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1397         alloc_reg64(current,i,rt1[i]);
1398         current->is32&=~(1LL<<rt1[i]);
1399       }
1400     }
1401     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1402     if(is_const(current,rs1[i])) {
1403       int v=get_const(current,rs1[i]);
1404       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1405       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1406       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1407     }
1408     else clear_const(current,rt1[i]);
1409   }
1410   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1411     if(is_const(current,rs1[i])) {
1412       int v=get_const(current,rs1[i]);
1413       set_const(current,rt1[i],v+imm[i]);
1414     }
1415     else clear_const(current,rt1[i]);
1416     current->is32|=1LL<<rt1[i];
1417   }
1418   else {
1419     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1420     current->is32|=1LL<<rt1[i];
1421   }
1422   dirty_reg(current,rt1[i]);
1423 }
1424
1425 void load_alloc(struct regstat *current,int i)
1426 {
1427   clear_const(current,rt1[i]);
1428   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1429   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1430   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1431   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1432     alloc_reg(current,i,rt1[i]);
1433     assert(get_reg(current->regmap,rt1[i])>=0);
1434     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1435     {
1436       current->is32&=~(1LL<<rt1[i]);
1437       alloc_reg64(current,i,rt1[i]);
1438     }
1439     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1440     {
1441       current->is32&=~(1LL<<rt1[i]);
1442       alloc_reg64(current,i,rt1[i]);
1443       alloc_all(current,i);
1444       alloc_reg64(current,i,FTEMP);
1445       minimum_free_regs[i]=HOST_REGS;
1446     }
1447     else current->is32|=1LL<<rt1[i];
1448     dirty_reg(current,rt1[i]);
1449     // LWL/LWR need a temporary register for the old value
1450     if(opcode[i]==0x22||opcode[i]==0x26)
1451     {
1452       alloc_reg(current,i,FTEMP);
1453       alloc_reg_temp(current,i,-1);
1454       minimum_free_regs[i]=1;
1455     }
1456   }
1457   else
1458   {
1459     // Load to r0 or unneeded register (dummy load)
1460     // but we still need a register to calculate the address
1461     if(opcode[i]==0x22||opcode[i]==0x26)
1462     {
1463       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1464     }
1465     alloc_reg_temp(current,i,-1);
1466     minimum_free_regs[i]=1;
1467     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1468     {
1469       alloc_all(current,i);
1470       alloc_reg64(current,i,FTEMP);
1471       minimum_free_regs[i]=HOST_REGS;
1472     }
1473   }
1474 }
1475
1476 void store_alloc(struct regstat *current,int i)
1477 {
1478   clear_const(current,rs2[i]);
1479   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1480   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1481   alloc_reg(current,i,rs2[i]);
1482   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1483     alloc_reg64(current,i,rs2[i]);
1484     if(rs2[i]) alloc_reg(current,i,FTEMP);
1485   }
1486   #if defined(HOST_IMM8)
1487   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1488   else alloc_reg(current,i,INVCP);
1489   #endif
1490   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1491     alloc_reg(current,i,FTEMP);
1492   }
1493   // We need a temporary register for address generation
1494   alloc_reg_temp(current,i,-1);
1495   minimum_free_regs[i]=1;
1496 }
1497
1498 void c1ls_alloc(struct regstat *current,int i)
1499 {
1500   //clear_const(current,rs1[i]); // FIXME
1501   clear_const(current,rt1[i]);
1502   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1503   alloc_reg(current,i,CSREG); // Status
1504   alloc_reg(current,i,FTEMP);
1505   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1506     alloc_reg64(current,i,FTEMP);
1507   }
1508   #if defined(HOST_IMM8)
1509   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1510   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1511     alloc_reg(current,i,INVCP);
1512   #endif
1513   // We need a temporary register for address generation
1514   alloc_reg_temp(current,i,-1);
1515 }
1516
1517 void c2ls_alloc(struct regstat *current,int i)
1518 {
1519   clear_const(current,rt1[i]);
1520   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1521   alloc_reg(current,i,FTEMP);
1522   #if defined(HOST_IMM8)
1523   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1524   if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1525     alloc_reg(current,i,INVCP);
1526   #endif
1527   // We need a temporary register for address generation
1528   alloc_reg_temp(current,i,-1);
1529   minimum_free_regs[i]=1;
1530 }
1531
1532 #ifndef multdiv_alloc
1533 void multdiv_alloc(struct regstat *current,int i)
1534 {
1535   //  case 0x18: MULT
1536   //  case 0x19: MULTU
1537   //  case 0x1A: DIV
1538   //  case 0x1B: DIVU
1539   //  case 0x1C: DMULT
1540   //  case 0x1D: DMULTU
1541   //  case 0x1E: DDIV
1542   //  case 0x1F: DDIVU
1543   clear_const(current,rs1[i]);
1544   clear_const(current,rs2[i]);
1545   if(rs1[i]&&rs2[i])
1546   {
1547     if((opcode2[i]&4)==0) // 32-bit
1548     {
1549       current->u&=~(1LL<<HIREG);
1550       current->u&=~(1LL<<LOREG);
1551       alloc_reg(current,i,HIREG);
1552       alloc_reg(current,i,LOREG);
1553       alloc_reg(current,i,rs1[i]);
1554       alloc_reg(current,i,rs2[i]);
1555       current->is32|=1LL<<HIREG;
1556       current->is32|=1LL<<LOREG;
1557       dirty_reg(current,HIREG);
1558       dirty_reg(current,LOREG);
1559     }
1560     else // 64-bit
1561     {
1562       current->u&=~(1LL<<HIREG);
1563       current->u&=~(1LL<<LOREG);
1564       current->uu&=~(1LL<<HIREG);
1565       current->uu&=~(1LL<<LOREG);
1566       alloc_reg64(current,i,HIREG);
1567       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1568       alloc_reg64(current,i,rs1[i]);
1569       alloc_reg64(current,i,rs2[i]);
1570       alloc_all(current,i);
1571       current->is32&=~(1LL<<HIREG);
1572       current->is32&=~(1LL<<LOREG);
1573       dirty_reg(current,HIREG);
1574       dirty_reg(current,LOREG);
1575       minimum_free_regs[i]=HOST_REGS;
1576     }
1577   }
1578   else
1579   {
1580     // Multiply by zero is zero.
1581     // MIPS does not have a divide by zero exception.
1582     // The result is undefined, we return zero.
1583     alloc_reg(current,i,HIREG);
1584     alloc_reg(current,i,LOREG);
1585     current->is32|=1LL<<HIREG;
1586     current->is32|=1LL<<LOREG;
1587     dirty_reg(current,HIREG);
1588     dirty_reg(current,LOREG);
1589   }
1590 }
1591 #endif
1592
1593 void cop0_alloc(struct regstat *current,int i)
1594 {
1595   if(opcode2[i]==0) // MFC0
1596   {
1597     if(rt1[i]) {
1598       clear_const(current,rt1[i]);
1599       alloc_all(current,i);
1600       alloc_reg(current,i,rt1[i]);
1601       current->is32|=1LL<<rt1[i];
1602       dirty_reg(current,rt1[i]);
1603     }
1604   }
1605   else if(opcode2[i]==4) // MTC0
1606   {
1607     if(rs1[i]){
1608       clear_const(current,rs1[i]);
1609       alloc_reg(current,i,rs1[i]);
1610       alloc_all(current,i);
1611     }
1612     else {
1613       alloc_all(current,i); // FIXME: Keep r0
1614       current->u&=~1LL;
1615       alloc_reg(current,i,0);
1616     }
1617   }
1618   else
1619   {
1620     // TLBR/TLBWI/TLBWR/TLBP/ERET
1621     assert(opcode2[i]==0x10);
1622     alloc_all(current,i);
1623   }
1624   minimum_free_regs[i]=HOST_REGS;
1625 }
1626
1627 void cop1_alloc(struct regstat *current,int i)
1628 {
1629   alloc_reg(current,i,CSREG); // Load status
1630   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1631   {
1632     if(rt1[i]){
1633       clear_const(current,rt1[i]);
1634       if(opcode2[i]==1) {
1635         alloc_reg64(current,i,rt1[i]); // DMFC1
1636         current->is32&=~(1LL<<rt1[i]);
1637       }else{
1638         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1639         current->is32|=1LL<<rt1[i];
1640       }
1641       dirty_reg(current,rt1[i]);
1642     }
1643     alloc_reg_temp(current,i,-1);
1644   }
1645   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1646   {
1647     if(rs1[i]){
1648       clear_const(current,rs1[i]);
1649       if(opcode2[i]==5)
1650         alloc_reg64(current,i,rs1[i]); // DMTC1
1651       else
1652         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1653       alloc_reg_temp(current,i,-1);
1654     }
1655     else {
1656       current->u&=~1LL;
1657       alloc_reg(current,i,0);
1658       alloc_reg_temp(current,i,-1);
1659     }
1660   }
1661   minimum_free_regs[i]=1;
1662 }
1663 void fconv_alloc(struct regstat *current,int i)
1664 {
1665   alloc_reg(current,i,CSREG); // Load status
1666   alloc_reg_temp(current,i,-1);
1667   minimum_free_regs[i]=1;
1668 }
1669 void float_alloc(struct regstat *current,int i)
1670 {
1671   alloc_reg(current,i,CSREG); // Load status
1672   alloc_reg_temp(current,i,-1);
1673   minimum_free_regs[i]=1;
1674 }
1675 void c2op_alloc(struct regstat *current,int i)
1676 {
1677   alloc_reg_temp(current,i,-1);
1678 }
1679 void fcomp_alloc(struct regstat *current,int i)
1680 {
1681   alloc_reg(current,i,CSREG); // Load status
1682   alloc_reg(current,i,FSREG); // Load flags
1683   dirty_reg(current,FSREG); // Flag will be modified
1684   alloc_reg_temp(current,i,-1);
1685   minimum_free_regs[i]=1;
1686 }
1687
1688 void syscall_alloc(struct regstat *current,int i)
1689 {
1690   alloc_cc(current,i);
1691   dirty_reg(current,CCREG);
1692   alloc_all(current,i);
1693   minimum_free_regs[i]=HOST_REGS;
1694   current->isconst=0;
1695 }
1696
1697 void delayslot_alloc(struct regstat *current,int i)
1698 {
1699   switch(itype[i]) {
1700     case UJUMP:
1701     case CJUMP:
1702     case SJUMP:
1703     case RJUMP:
1704     case FJUMP:
1705     case SYSCALL:
1706     case HLECALL:
1707     case SPAN:
1708       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1709       SysPrintf("Disabled speculative precompilation\n");
1710       stop_after_jal=1;
1711       break;
1712     case IMM16:
1713       imm16_alloc(current,i);
1714       break;
1715     case LOAD:
1716     case LOADLR:
1717       load_alloc(current,i);
1718       break;
1719     case STORE:
1720     case STORELR:
1721       store_alloc(current,i);
1722       break;
1723     case ALU:
1724       alu_alloc(current,i);
1725       break;
1726     case SHIFT:
1727       shift_alloc(current,i);
1728       break;
1729     case MULTDIV:
1730       multdiv_alloc(current,i);
1731       break;
1732     case SHIFTIMM:
1733       shiftimm_alloc(current,i);
1734       break;
1735     case MOV:
1736       mov_alloc(current,i);
1737       break;
1738     case COP0:
1739       cop0_alloc(current,i);
1740       break;
1741     case COP1:
1742     case COP2:
1743       cop1_alloc(current,i);
1744       break;
1745     case C1LS:
1746       c1ls_alloc(current,i);
1747       break;
1748     case C2LS:
1749       c2ls_alloc(current,i);
1750       break;
1751     case FCONV:
1752       fconv_alloc(current,i);
1753       break;
1754     case FLOAT:
1755       float_alloc(current,i);
1756       break;
1757     case FCOMP:
1758       fcomp_alloc(current,i);
1759       break;
1760     case C2OP:
1761       c2op_alloc(current,i);
1762       break;
1763   }
1764 }
1765
1766 // Special case where a branch and delay slot span two pages in virtual memory
1767 static void pagespan_alloc(struct regstat *current,int i)
1768 {
1769   current->isconst=0;
1770   current->wasconst=0;
1771   regs[i].wasconst=0;
1772   minimum_free_regs[i]=HOST_REGS;
1773   alloc_all(current,i);
1774   alloc_cc(current,i);
1775   dirty_reg(current,CCREG);
1776   if(opcode[i]==3) // JAL
1777   {
1778     alloc_reg(current,i,31);
1779     dirty_reg(current,31);
1780   }
1781   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1782   {
1783     alloc_reg(current,i,rs1[i]);
1784     if (rt1[i]!=0) {
1785       alloc_reg(current,i,rt1[i]);
1786       dirty_reg(current,rt1[i]);
1787     }
1788   }
1789   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1790   {
1791     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1792     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1793     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1794     {
1795       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1796       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1797     }
1798   }
1799   else
1800   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1801   {
1802     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1803     if(!((current->is32>>rs1[i])&1))
1804     {
1805       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1806     }
1807   }
1808   else
1809   if(opcode[i]==0x11) // BC1
1810   {
1811     alloc_reg(current,i,FSREG);
1812     alloc_reg(current,i,CSREG);
1813   }
1814   //else ...
1815 }
1816
1817 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1818 {
1819   stubs[stubcount][0]=type;
1820   stubs[stubcount][1]=addr;
1821   stubs[stubcount][2]=retaddr;
1822   stubs[stubcount][3]=a;
1823   stubs[stubcount][4]=b;
1824   stubs[stubcount][5]=c;
1825   stubs[stubcount][6]=d;
1826   stubs[stubcount][7]=e;
1827   stubcount++;
1828 }
1829
1830 // Write out a single register
1831 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1832 {
1833   int hr;
1834   for(hr=0;hr<HOST_REGS;hr++) {
1835     if(hr!=EXCLUDE_REG) {
1836       if((regmap[hr]&63)==r) {
1837         if((dirty>>hr)&1) {
1838           if(regmap[hr]<64) {
1839             emit_storereg(r,hr);
1840           }else{
1841             emit_storereg(r|64,hr);
1842           }
1843         }
1844       }
1845     }
1846   }
1847 }
1848
1849 int mchecksum()
1850 {
1851   //if(!tracedebug) return 0;
1852   int i;
1853   int sum=0;
1854   for(i=0;i<2097152;i++) {
1855     unsigned int temp=sum;
1856     sum<<=1;
1857     sum|=(~temp)>>31;
1858     sum^=((u_int *)rdram)[i];
1859   }
1860   return sum;
1861 }
1862 int rchecksum()
1863 {
1864   int i;
1865   int sum=0;
1866   for(i=0;i<64;i++)
1867     sum^=((u_int *)reg)[i];
1868   return sum;
1869 }
1870 void rlist()
1871 {
1872   int i;
1873   printf("TRACE: ");
1874   for(i=0;i<32;i++)
1875     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
1876   printf("\n");
1877 }
1878
1879 void enabletrace()
1880 {
1881   tracedebug=1;
1882 }
1883
1884 void memdebug(int i)
1885 {
1886   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
1887   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
1888   //rlist();
1889   //if(tracedebug) {
1890   //if(Count>=-2084597794) {
1891   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
1892   //if(0) {
1893     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
1894     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
1895     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
1896     rlist();
1897     #ifdef __i386__
1898     printf("TRACE: %x\n",(&i)[-1]);
1899     #endif
1900     #ifdef __arm__
1901     int j;
1902     printf("TRACE: %x \n",(&j)[10]);
1903     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
1904     #endif
1905     //fflush(stdout);
1906   }
1907   //printf("TRACE: %x\n",(&i)[-1]);
1908 }
1909
1910 void alu_assemble(int i,struct regstat *i_regs)
1911 {
1912   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1913     if(rt1[i]) {
1914       signed char s1,s2,t;
1915       t=get_reg(i_regs->regmap,rt1[i]);
1916       if(t>=0) {
1917         s1=get_reg(i_regs->regmap,rs1[i]);
1918         s2=get_reg(i_regs->regmap,rs2[i]);
1919         if(rs1[i]&&rs2[i]) {
1920           assert(s1>=0);
1921           assert(s2>=0);
1922           if(opcode2[i]&2) emit_sub(s1,s2,t);
1923           else emit_add(s1,s2,t);
1924         }
1925         else if(rs1[i]) {
1926           if(s1>=0) emit_mov(s1,t);
1927           else emit_loadreg(rs1[i],t);
1928         }
1929         else if(rs2[i]) {
1930           if(s2>=0) {
1931             if(opcode2[i]&2) emit_neg(s2,t);
1932             else emit_mov(s2,t);
1933           }
1934           else {
1935             emit_loadreg(rs2[i],t);
1936             if(opcode2[i]&2) emit_neg(t,t);
1937           }
1938         }
1939         else emit_zeroreg(t);
1940       }
1941     }
1942   }
1943   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1944     if(rt1[i]) {
1945       signed char s1l,s2l,s1h,s2h,tl,th;
1946       tl=get_reg(i_regs->regmap,rt1[i]);
1947       th=get_reg(i_regs->regmap,rt1[i]|64);
1948       if(tl>=0) {
1949         s1l=get_reg(i_regs->regmap,rs1[i]);
1950         s2l=get_reg(i_regs->regmap,rs2[i]);
1951         s1h=get_reg(i_regs->regmap,rs1[i]|64);
1952         s2h=get_reg(i_regs->regmap,rs2[i]|64);
1953         if(rs1[i]&&rs2[i]) {
1954           assert(s1l>=0);
1955           assert(s2l>=0);
1956           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
1957           else emit_adds(s1l,s2l,tl);
1958           if(th>=0) {
1959             #ifdef INVERTED_CARRY
1960             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
1961             #else
1962             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
1963             #endif
1964             else emit_add(s1h,s2h,th);
1965           }
1966         }
1967         else if(rs1[i]) {
1968           if(s1l>=0) emit_mov(s1l,tl);
1969           else emit_loadreg(rs1[i],tl);
1970           if(th>=0) {
1971             if(s1h>=0) emit_mov(s1h,th);
1972             else emit_loadreg(rs1[i]|64,th);
1973           }
1974         }
1975         else if(rs2[i]) {
1976           if(s2l>=0) {
1977             if(opcode2[i]&2) emit_negs(s2l,tl);
1978             else emit_mov(s2l,tl);
1979           }
1980           else {
1981             emit_loadreg(rs2[i],tl);
1982             if(opcode2[i]&2) emit_negs(tl,tl);
1983           }
1984           if(th>=0) {
1985             #ifdef INVERTED_CARRY
1986             if(s2h>=0) emit_mov(s2h,th);
1987             else emit_loadreg(rs2[i]|64,th);
1988             if(opcode2[i]&2) {
1989               emit_adcimm(-1,th); // x86 has inverted carry flag
1990               emit_not(th,th);
1991             }
1992             #else
1993             if(opcode2[i]&2) {
1994               if(s2h>=0) emit_rscimm(s2h,0,th);
1995               else {
1996                 emit_loadreg(rs2[i]|64,th);
1997                 emit_rscimm(th,0,th);
1998               }
1999             }else{
2000               if(s2h>=0) emit_mov(s2h,th);
2001               else emit_loadreg(rs2[i]|64,th);
2002             }
2003             #endif
2004           }
2005         }
2006         else {
2007           emit_zeroreg(tl);
2008           if(th>=0) emit_zeroreg(th);
2009         }
2010       }
2011     }
2012   }
2013   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2014     if(rt1[i]) {
2015       signed char s1l,s1h,s2l,s2h,t;
2016       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2017       {
2018         t=get_reg(i_regs->regmap,rt1[i]);
2019         //assert(t>=0);
2020         if(t>=0) {
2021           s1l=get_reg(i_regs->regmap,rs1[i]);
2022           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2023           s2l=get_reg(i_regs->regmap,rs2[i]);
2024           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2025           if(rs2[i]==0) // rx<r0
2026           {
2027             assert(s1h>=0);
2028             if(opcode2[i]==0x2a) // SLT
2029               emit_shrimm(s1h,31,t);
2030             else // SLTU (unsigned can not be less than zero)
2031               emit_zeroreg(t);
2032           }
2033           else if(rs1[i]==0) // r0<rx
2034           {
2035             assert(s2h>=0);
2036             if(opcode2[i]==0x2a) // SLT
2037               emit_set_gz64_32(s2h,s2l,t);
2038             else // SLTU (set if not zero)
2039               emit_set_nz64_32(s2h,s2l,t);
2040           }
2041           else {
2042             assert(s1l>=0);assert(s1h>=0);
2043             assert(s2l>=0);assert(s2h>=0);
2044             if(opcode2[i]==0x2a) // SLT
2045               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2046             else // SLTU
2047               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2048           }
2049         }
2050       } else {
2051         t=get_reg(i_regs->regmap,rt1[i]);
2052         //assert(t>=0);
2053         if(t>=0) {
2054           s1l=get_reg(i_regs->regmap,rs1[i]);
2055           s2l=get_reg(i_regs->regmap,rs2[i]);
2056           if(rs2[i]==0) // rx<r0
2057           {
2058             assert(s1l>=0);
2059             if(opcode2[i]==0x2a) // SLT
2060               emit_shrimm(s1l,31,t);
2061             else // SLTU (unsigned can not be less than zero)
2062               emit_zeroreg(t);
2063           }
2064           else if(rs1[i]==0) // r0<rx
2065           {
2066             assert(s2l>=0);
2067             if(opcode2[i]==0x2a) // SLT
2068               emit_set_gz32(s2l,t);
2069             else // SLTU (set if not zero)
2070               emit_set_nz32(s2l,t);
2071           }
2072           else{
2073             assert(s1l>=0);assert(s2l>=0);
2074             if(opcode2[i]==0x2a) // SLT
2075               emit_set_if_less32(s1l,s2l,t);
2076             else // SLTU
2077               emit_set_if_carry32(s1l,s2l,t);
2078           }
2079         }
2080       }
2081     }
2082   }
2083   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2084     if(rt1[i]) {
2085       signed char s1l,s1h,s2l,s2h,th,tl;
2086       tl=get_reg(i_regs->regmap,rt1[i]);
2087       th=get_reg(i_regs->regmap,rt1[i]|64);
2088       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2089       {
2090         assert(tl>=0);
2091         if(tl>=0) {
2092           s1l=get_reg(i_regs->regmap,rs1[i]);
2093           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2094           s2l=get_reg(i_regs->regmap,rs2[i]);
2095           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2096           if(rs1[i]&&rs2[i]) {
2097             assert(s1l>=0);assert(s1h>=0);
2098             assert(s2l>=0);assert(s2h>=0);
2099             if(opcode2[i]==0x24) { // AND
2100               emit_and(s1l,s2l,tl);
2101               emit_and(s1h,s2h,th);
2102             } else
2103             if(opcode2[i]==0x25) { // OR
2104               emit_or(s1l,s2l,tl);
2105               emit_or(s1h,s2h,th);
2106             } else
2107             if(opcode2[i]==0x26) { // XOR
2108               emit_xor(s1l,s2l,tl);
2109               emit_xor(s1h,s2h,th);
2110             } else
2111             if(opcode2[i]==0x27) { // NOR
2112               emit_or(s1l,s2l,tl);
2113               emit_or(s1h,s2h,th);
2114               emit_not(tl,tl);
2115               emit_not(th,th);
2116             }
2117           }
2118           else
2119           {
2120             if(opcode2[i]==0x24) { // AND
2121               emit_zeroreg(tl);
2122               emit_zeroreg(th);
2123             } else
2124             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2125               if(rs1[i]){
2126                 if(s1l>=0) emit_mov(s1l,tl);
2127                 else emit_loadreg(rs1[i],tl);
2128                 if(s1h>=0) emit_mov(s1h,th);
2129                 else emit_loadreg(rs1[i]|64,th);
2130               }
2131               else
2132               if(rs2[i]){
2133                 if(s2l>=0) emit_mov(s2l,tl);
2134                 else emit_loadreg(rs2[i],tl);
2135                 if(s2h>=0) emit_mov(s2h,th);
2136                 else emit_loadreg(rs2[i]|64,th);
2137               }
2138               else{
2139                 emit_zeroreg(tl);
2140                 emit_zeroreg(th);
2141               }
2142             } else
2143             if(opcode2[i]==0x27) { // NOR
2144               if(rs1[i]){
2145                 if(s1l>=0) emit_not(s1l,tl);
2146                 else{
2147                   emit_loadreg(rs1[i],tl);
2148                   emit_not(tl,tl);
2149                 }
2150                 if(s1h>=0) emit_not(s1h,th);
2151                 else{
2152                   emit_loadreg(rs1[i]|64,th);
2153                   emit_not(th,th);
2154                 }
2155               }
2156               else
2157               if(rs2[i]){
2158                 if(s2l>=0) emit_not(s2l,tl);
2159                 else{
2160                   emit_loadreg(rs2[i],tl);
2161                   emit_not(tl,tl);
2162                 }
2163                 if(s2h>=0) emit_not(s2h,th);
2164                 else{
2165                   emit_loadreg(rs2[i]|64,th);
2166                   emit_not(th,th);
2167                 }
2168               }
2169               else {
2170                 emit_movimm(-1,tl);
2171                 emit_movimm(-1,th);
2172               }
2173             }
2174           }
2175         }
2176       }
2177       else
2178       {
2179         // 32 bit
2180         if(tl>=0) {
2181           s1l=get_reg(i_regs->regmap,rs1[i]);
2182           s2l=get_reg(i_regs->regmap,rs2[i]);
2183           if(rs1[i]&&rs2[i]) {
2184             assert(s1l>=0);
2185             assert(s2l>=0);
2186             if(opcode2[i]==0x24) { // AND
2187               emit_and(s1l,s2l,tl);
2188             } else
2189             if(opcode2[i]==0x25) { // OR
2190               emit_or(s1l,s2l,tl);
2191             } else
2192             if(opcode2[i]==0x26) { // XOR
2193               emit_xor(s1l,s2l,tl);
2194             } else
2195             if(opcode2[i]==0x27) { // NOR
2196               emit_or(s1l,s2l,tl);
2197               emit_not(tl,tl);
2198             }
2199           }
2200           else
2201           {
2202             if(opcode2[i]==0x24) { // AND
2203               emit_zeroreg(tl);
2204             } else
2205             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2206               if(rs1[i]){
2207                 if(s1l>=0) emit_mov(s1l,tl);
2208                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2209               }
2210               else
2211               if(rs2[i]){
2212                 if(s2l>=0) emit_mov(s2l,tl);
2213                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2214               }
2215               else emit_zeroreg(tl);
2216             } else
2217             if(opcode2[i]==0x27) { // NOR
2218               if(rs1[i]){
2219                 if(s1l>=0) emit_not(s1l,tl);
2220                 else {
2221                   emit_loadreg(rs1[i],tl);
2222                   emit_not(tl,tl);
2223                 }
2224               }
2225               else
2226               if(rs2[i]){
2227                 if(s2l>=0) emit_not(s2l,tl);
2228                 else {
2229                   emit_loadreg(rs2[i],tl);
2230                   emit_not(tl,tl);
2231                 }
2232               }
2233               else emit_movimm(-1,tl);
2234             }
2235           }
2236         }
2237       }
2238     }
2239   }
2240 }
2241
2242 void imm16_assemble(int i,struct regstat *i_regs)
2243 {
2244   if (opcode[i]==0x0f) { // LUI
2245     if(rt1[i]) {
2246       signed char t;
2247       t=get_reg(i_regs->regmap,rt1[i]);
2248       //assert(t>=0);
2249       if(t>=0) {
2250         if(!((i_regs->isconst>>t)&1))
2251           emit_movimm(imm[i]<<16,t);
2252       }
2253     }
2254   }
2255   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2256     if(rt1[i]) {
2257       signed char s,t;
2258       t=get_reg(i_regs->regmap,rt1[i]);
2259       s=get_reg(i_regs->regmap,rs1[i]);
2260       if(rs1[i]) {
2261         //assert(t>=0);
2262         //assert(s>=0);
2263         if(t>=0) {
2264           if(!((i_regs->isconst>>t)&1)) {
2265             if(s<0) {
2266               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2267               emit_addimm(t,imm[i],t);
2268             }else{
2269               if(!((i_regs->wasconst>>s)&1))
2270                 emit_addimm(s,imm[i],t);
2271               else
2272                 emit_movimm(constmap[i][s]+imm[i],t);
2273             }
2274           }
2275         }
2276       } else {
2277         if(t>=0) {
2278           if(!((i_regs->isconst>>t)&1))
2279             emit_movimm(imm[i],t);
2280         }
2281       }
2282     }
2283   }
2284   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2285     if(rt1[i]) {
2286       signed char sh,sl,th,tl;
2287       th=get_reg(i_regs->regmap,rt1[i]|64);
2288       tl=get_reg(i_regs->regmap,rt1[i]);
2289       sh=get_reg(i_regs->regmap,rs1[i]|64);
2290       sl=get_reg(i_regs->regmap,rs1[i]);
2291       if(tl>=0) {
2292         if(rs1[i]) {
2293           assert(sh>=0);
2294           assert(sl>=0);
2295           if(th>=0) {
2296             emit_addimm64_32(sh,sl,imm[i],th,tl);
2297           }
2298           else {
2299             emit_addimm(sl,imm[i],tl);
2300           }
2301         } else {
2302           emit_movimm(imm[i],tl);
2303           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2304         }
2305       }
2306     }
2307   }
2308   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2309     if(rt1[i]) {
2310       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2311       signed char sh,sl,t;
2312       t=get_reg(i_regs->regmap,rt1[i]);
2313       sh=get_reg(i_regs->regmap,rs1[i]|64);
2314       sl=get_reg(i_regs->regmap,rs1[i]);
2315       //assert(t>=0);
2316       if(t>=0) {
2317         if(rs1[i]>0) {
2318           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2319           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2320             if(opcode[i]==0x0a) { // SLTI
2321               if(sl<0) {
2322                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2323                 emit_slti32(t,imm[i],t);
2324               }else{
2325                 emit_slti32(sl,imm[i],t);
2326               }
2327             }
2328             else { // SLTIU
2329               if(sl<0) {
2330                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2331                 emit_sltiu32(t,imm[i],t);
2332               }else{
2333                 emit_sltiu32(sl,imm[i],t);
2334               }
2335             }
2336           }else{ // 64-bit
2337             assert(sl>=0);
2338             if(opcode[i]==0x0a) // SLTI
2339               emit_slti64_32(sh,sl,imm[i],t);
2340             else // SLTIU
2341               emit_sltiu64_32(sh,sl,imm[i],t);
2342           }
2343         }else{
2344           // SLTI(U) with r0 is just stupid,
2345           // nonetheless examples can be found
2346           if(opcode[i]==0x0a) // SLTI
2347             if(0<imm[i]) emit_movimm(1,t);
2348             else emit_zeroreg(t);
2349           else // SLTIU
2350           {
2351             if(imm[i]) emit_movimm(1,t);
2352             else emit_zeroreg(t);
2353           }
2354         }
2355       }
2356     }
2357   }
2358   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2359     if(rt1[i]) {
2360       signed char sh,sl,th,tl;
2361       th=get_reg(i_regs->regmap,rt1[i]|64);
2362       tl=get_reg(i_regs->regmap,rt1[i]);
2363       sh=get_reg(i_regs->regmap,rs1[i]|64);
2364       sl=get_reg(i_regs->regmap,rs1[i]);
2365       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2366         if(opcode[i]==0x0c) //ANDI
2367         {
2368           if(rs1[i]) {
2369             if(sl<0) {
2370               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2371               emit_andimm(tl,imm[i],tl);
2372             }else{
2373               if(!((i_regs->wasconst>>sl)&1))
2374                 emit_andimm(sl,imm[i],tl);
2375               else
2376                 emit_movimm(constmap[i][sl]&imm[i],tl);
2377             }
2378           }
2379           else
2380             emit_zeroreg(tl);
2381           if(th>=0) emit_zeroreg(th);
2382         }
2383         else
2384         {
2385           if(rs1[i]) {
2386             if(sl<0) {
2387               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2388             }
2389             if(th>=0) {
2390               if(sh<0) {
2391                 emit_loadreg(rs1[i]|64,th);
2392               }else{
2393                 emit_mov(sh,th);
2394               }
2395             }
2396             if(opcode[i]==0x0d) { // ORI
2397               if(sl<0) {
2398                 emit_orimm(tl,imm[i],tl);
2399               }else{
2400                 if(!((i_regs->wasconst>>sl)&1))
2401                   emit_orimm(sl,imm[i],tl);
2402                 else
2403                   emit_movimm(constmap[i][sl]|imm[i],tl);
2404               }
2405             }
2406             if(opcode[i]==0x0e) { // XORI
2407               if(sl<0) {
2408                 emit_xorimm(tl,imm[i],tl);
2409               }else{
2410                 if(!((i_regs->wasconst>>sl)&1))
2411                   emit_xorimm(sl,imm[i],tl);
2412                 else
2413                   emit_movimm(constmap[i][sl]^imm[i],tl);
2414               }
2415             }
2416           }
2417           else {
2418             emit_movimm(imm[i],tl);
2419             if(th>=0) emit_zeroreg(th);
2420           }
2421         }
2422       }
2423     }
2424   }
2425 }
2426
2427 void shiftimm_assemble(int i,struct regstat *i_regs)
2428 {
2429   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2430   {
2431     if(rt1[i]) {
2432       signed char s,t;
2433       t=get_reg(i_regs->regmap,rt1[i]);
2434       s=get_reg(i_regs->regmap,rs1[i]);
2435       //assert(t>=0);
2436       if(t>=0&&!((i_regs->isconst>>t)&1)){
2437         if(rs1[i]==0)
2438         {
2439           emit_zeroreg(t);
2440         }
2441         else
2442         {
2443           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2444           if(imm[i]) {
2445             if(opcode2[i]==0) // SLL
2446             {
2447               emit_shlimm(s<0?t:s,imm[i],t);
2448             }
2449             if(opcode2[i]==2) // SRL
2450             {
2451               emit_shrimm(s<0?t:s,imm[i],t);
2452             }
2453             if(opcode2[i]==3) // SRA
2454             {
2455               emit_sarimm(s<0?t:s,imm[i],t);
2456             }
2457           }else{
2458             // Shift by zero
2459             if(s>=0 && s!=t) emit_mov(s,t);
2460           }
2461         }
2462       }
2463       //emit_storereg(rt1[i],t); //DEBUG
2464     }
2465   }
2466   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2467   {
2468     if(rt1[i]) {
2469       signed char sh,sl,th,tl;
2470       th=get_reg(i_regs->regmap,rt1[i]|64);
2471       tl=get_reg(i_regs->regmap,rt1[i]);
2472       sh=get_reg(i_regs->regmap,rs1[i]|64);
2473       sl=get_reg(i_regs->regmap,rs1[i]);
2474       if(tl>=0) {
2475         if(rs1[i]==0)
2476         {
2477           emit_zeroreg(tl);
2478           if(th>=0) emit_zeroreg(th);
2479         }
2480         else
2481         {
2482           assert(sl>=0);
2483           assert(sh>=0);
2484           if(imm[i]) {
2485             if(opcode2[i]==0x38) // DSLL
2486             {
2487               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2488               emit_shlimm(sl,imm[i],tl);
2489             }
2490             if(opcode2[i]==0x3a) // DSRL
2491             {
2492               emit_shrdimm(sl,sh,imm[i],tl);
2493               if(th>=0) emit_shrimm(sh,imm[i],th);
2494             }
2495             if(opcode2[i]==0x3b) // DSRA
2496             {
2497               emit_shrdimm(sl,sh,imm[i],tl);
2498               if(th>=0) emit_sarimm(sh,imm[i],th);
2499             }
2500           }else{
2501             // Shift by zero
2502             if(sl!=tl) emit_mov(sl,tl);
2503             if(th>=0&&sh!=th) emit_mov(sh,th);
2504           }
2505         }
2506       }
2507     }
2508   }
2509   if(opcode2[i]==0x3c) // DSLL32
2510   {
2511     if(rt1[i]) {
2512       signed char sl,tl,th;
2513       tl=get_reg(i_regs->regmap,rt1[i]);
2514       th=get_reg(i_regs->regmap,rt1[i]|64);
2515       sl=get_reg(i_regs->regmap,rs1[i]);
2516       if(th>=0||tl>=0){
2517         assert(tl>=0);
2518         assert(th>=0);
2519         assert(sl>=0);
2520         emit_mov(sl,th);
2521         emit_zeroreg(tl);
2522         if(imm[i]>32)
2523         {
2524           emit_shlimm(th,imm[i]&31,th);
2525         }
2526       }
2527     }
2528   }
2529   if(opcode2[i]==0x3e) // DSRL32
2530   {
2531     if(rt1[i]) {
2532       signed char sh,tl,th;
2533       tl=get_reg(i_regs->regmap,rt1[i]);
2534       th=get_reg(i_regs->regmap,rt1[i]|64);
2535       sh=get_reg(i_regs->regmap,rs1[i]|64);
2536       if(tl>=0){
2537         assert(sh>=0);
2538         emit_mov(sh,tl);
2539         if(th>=0) emit_zeroreg(th);
2540         if(imm[i]>32)
2541         {
2542           emit_shrimm(tl,imm[i]&31,tl);
2543         }
2544       }
2545     }
2546   }
2547   if(opcode2[i]==0x3f) // DSRA32
2548   {
2549     if(rt1[i]) {
2550       signed char sh,tl;
2551       tl=get_reg(i_regs->regmap,rt1[i]);
2552       sh=get_reg(i_regs->regmap,rs1[i]|64);
2553       if(tl>=0){
2554         assert(sh>=0);
2555         emit_mov(sh,tl);
2556         if(imm[i]>32)
2557         {
2558           emit_sarimm(tl,imm[i]&31,tl);
2559         }
2560       }
2561     }
2562   }
2563 }
2564
2565 #ifndef shift_assemble
2566 void shift_assemble(int i,struct regstat *i_regs)
2567 {
2568   printf("Need shift_assemble for this architecture.\n");
2569   exit(1);
2570 }
2571 #endif
2572
2573 void load_assemble(int i,struct regstat *i_regs)
2574 {
2575   int s,th,tl,addr,map=-1;
2576   int offset;
2577   int jaddr=0;
2578   int memtarget=0,c=0;
2579   int fastload_reg_override=0;
2580   u_int hr,reglist=0;
2581   th=get_reg(i_regs->regmap,rt1[i]|64);
2582   tl=get_reg(i_regs->regmap,rt1[i]);
2583   s=get_reg(i_regs->regmap,rs1[i]);
2584   offset=imm[i];
2585   for(hr=0;hr<HOST_REGS;hr++) {
2586     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2587   }
2588   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2589   if(s>=0) {
2590     c=(i_regs->wasconst>>s)&1;
2591     if (c) {
2592       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2593     }
2594   }
2595   //printf("load_assemble: c=%d\n",c);
2596   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2597   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2598   if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2599     ||rt1[i]==0) {
2600       // could be FIFO, must perform the read
2601       // ||dummy read
2602       assem_debug("(forced read)\n");
2603       tl=get_reg(i_regs->regmap,-1);
2604       assert(tl>=0);
2605   }
2606   if(offset||s<0||c) addr=tl;
2607   else addr=s;
2608   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2609  if(tl>=0) {
2610   //printf("load_assemble: c=%d\n",c);
2611   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2612   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2613   reglist&=~(1<<tl);
2614   if(th>=0) reglist&=~(1<<th);
2615   if(!c) {
2616     #ifdef RAM_OFFSET
2617     map=get_reg(i_regs->regmap,ROREG);
2618     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2619     #endif
2620     #ifdef R29_HACK
2621     // Strmnnrmn's speed hack
2622     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2623     #endif
2624     {
2625       jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2626     }
2627   }
2628   else if(ram_offset&&memtarget) {
2629     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2630     fastload_reg_override=HOST_TEMPREG;
2631   }
2632   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2633   if (opcode[i]==0x20) { // LB
2634     if(!c||memtarget) {
2635       if(!dummy) {
2636         #ifdef HOST_IMM_ADDR32
2637         if(c)
2638           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2639         else
2640         #endif
2641         {
2642           //emit_xorimm(addr,3,tl);
2643           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2644           int x=0,a=tl;
2645 #ifdef BIG_ENDIAN_MIPS
2646           if(!c) emit_xorimm(addr,3,tl);
2647           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2648 #else
2649           if(!c) a=addr;
2650 #endif
2651           if(fastload_reg_override) a=fastload_reg_override;
2652
2653           emit_movsbl_indexed_tlb(x,a,map,tl);
2654         }
2655       }
2656       if(jaddr)
2657         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2658     }
2659     else
2660       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2661   }
2662   if (opcode[i]==0x21) { // LH
2663     if(!c||memtarget) {
2664       if(!dummy) {
2665         #ifdef HOST_IMM_ADDR32
2666         if(c)
2667           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2668         else
2669         #endif
2670         {
2671           int x=0,a=tl;
2672 #ifdef BIG_ENDIAN_MIPS
2673           if(!c) emit_xorimm(addr,2,tl);
2674           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2675 #else
2676           if(!c) a=addr;
2677 #endif
2678           if(fastload_reg_override) a=fastload_reg_override;
2679           //#ifdef
2680           //emit_movswl_indexed_tlb(x,tl,map,tl);
2681           //else
2682           if(map>=0) {
2683             emit_movswl_indexed(x,a,tl);
2684           }else{
2685             #if 1 //def RAM_OFFSET
2686             emit_movswl_indexed(x,a,tl);
2687             #else
2688             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2689             #endif
2690           }
2691         }
2692       }
2693       if(jaddr)
2694         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2695     }
2696     else
2697       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2698   }
2699   if (opcode[i]==0x23) { // LW
2700     if(!c||memtarget) {
2701       if(!dummy) {
2702         int a=addr;
2703         if(fastload_reg_override) a=fastload_reg_override;
2704         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2705         #ifdef HOST_IMM_ADDR32
2706         if(c)
2707           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2708         else
2709         #endif
2710         emit_readword_indexed_tlb(0,a,map,tl);
2711       }
2712       if(jaddr)
2713         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2714     }
2715     else
2716       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2717   }
2718   if (opcode[i]==0x24) { // LBU
2719     if(!c||memtarget) {
2720       if(!dummy) {
2721         #ifdef HOST_IMM_ADDR32
2722         if(c)
2723           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2724         else
2725         #endif
2726         {
2727           //emit_xorimm(addr,3,tl);
2728           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2729           int x=0,a=tl;
2730 #ifdef BIG_ENDIAN_MIPS
2731           if(!c) emit_xorimm(addr,3,tl);
2732           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2733 #else
2734           if(!c) a=addr;
2735 #endif
2736           if(fastload_reg_override) a=fastload_reg_override;
2737
2738           emit_movzbl_indexed_tlb(x,a,map,tl);
2739         }
2740       }
2741       if(jaddr)
2742         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2743     }
2744     else
2745       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2746   }
2747   if (opcode[i]==0x25) { // LHU
2748     if(!c||memtarget) {
2749       if(!dummy) {
2750         #ifdef HOST_IMM_ADDR32
2751         if(c)
2752           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2753         else
2754         #endif
2755         {
2756           int x=0,a=tl;
2757 #ifdef BIG_ENDIAN_MIPS
2758           if(!c) emit_xorimm(addr,2,tl);
2759           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2760 #else
2761           if(!c) a=addr;
2762 #endif
2763           if(fastload_reg_override) a=fastload_reg_override;
2764           //#ifdef
2765           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2766           //#else
2767           if(map>=0) {
2768             emit_movzwl_indexed(x,a,tl);
2769           }else{
2770             #if 1 //def RAM_OFFSET
2771             emit_movzwl_indexed(x,a,tl);
2772             #else
2773             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2774             #endif
2775           }
2776         }
2777       }
2778       if(jaddr)
2779         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2780     }
2781     else
2782       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2783   }
2784   if (opcode[i]==0x27) { // LWU
2785     assert(th>=0);
2786     if(!c||memtarget) {
2787       if(!dummy) {
2788         int a=addr;
2789         if(fastload_reg_override) a=fastload_reg_override;
2790         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2791         #ifdef HOST_IMM_ADDR32
2792         if(c)
2793           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2794         else
2795         #endif
2796         emit_readword_indexed_tlb(0,a,map,tl);
2797       }
2798       if(jaddr)
2799         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2800     }
2801     else {
2802       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2803     }
2804     emit_zeroreg(th);
2805   }
2806   if (opcode[i]==0x37) { // LD
2807     if(!c||memtarget) {
2808       if(!dummy) {
2809         int a=addr;
2810         if(fastload_reg_override) a=fastload_reg_override;
2811         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2812         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2813         #ifdef HOST_IMM_ADDR32
2814         if(c)
2815           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2816         else
2817         #endif
2818         emit_readdword_indexed_tlb(0,a,map,th,tl);
2819       }
2820       if(jaddr)
2821         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2822     }
2823     else
2824       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2825   }
2826  }
2827   //emit_storereg(rt1[i],tl); // DEBUG
2828   //if(opcode[i]==0x23)
2829   //if(opcode[i]==0x24)
2830   //if(opcode[i]==0x23||opcode[i]==0x24)
2831   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2832   {
2833     //emit_pusha();
2834     save_regs(0x100f);
2835         emit_readword((int)&last_count,ECX);
2836         #ifdef __i386__
2837         if(get_reg(i_regs->regmap,CCREG)<0)
2838           emit_loadreg(CCREG,HOST_CCREG);
2839         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2840         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2841         emit_writeword(HOST_CCREG,(int)&Count);
2842         #endif
2843         #ifdef __arm__
2844         if(get_reg(i_regs->regmap,CCREG)<0)
2845           emit_loadreg(CCREG,0);
2846         else
2847           emit_mov(HOST_CCREG,0);
2848         emit_add(0,ECX,0);
2849         emit_addimm(0,2*ccadj[i],0);
2850         emit_writeword(0,(int)&Count);
2851         #endif
2852     emit_call((int)memdebug);
2853     //emit_popa();
2854     restore_regs(0x100f);
2855   }*/
2856 }
2857
2858 #ifndef loadlr_assemble
2859 void loadlr_assemble(int i,struct regstat *i_regs)
2860 {
2861   printf("Need loadlr_assemble for this architecture.\n");
2862   exit(1);
2863 }
2864 #endif
2865
2866 void store_assemble(int i,struct regstat *i_regs)
2867 {
2868   int s,th,tl,map=-1;
2869   int addr,temp;
2870   int offset;
2871   int jaddr=0,type;
2872   int memtarget=0,c=0;
2873   int agr=AGEN1+(i&1);
2874   int faststore_reg_override=0;
2875   u_int hr,reglist=0;
2876   th=get_reg(i_regs->regmap,rs2[i]|64);
2877   tl=get_reg(i_regs->regmap,rs2[i]);
2878   s=get_reg(i_regs->regmap,rs1[i]);
2879   temp=get_reg(i_regs->regmap,agr);
2880   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2881   offset=imm[i];
2882   if(s>=0) {
2883     c=(i_regs->wasconst>>s)&1;
2884     if(c) {
2885       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2886     }
2887   }
2888   assert(tl>=0);
2889   assert(temp>=0);
2890   for(hr=0;hr<HOST_REGS;hr++) {
2891     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2892   }
2893   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2894   if(offset||s<0||c) addr=temp;
2895   else addr=s;
2896   if(!c) {
2897     jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2898   }
2899   else if(ram_offset&&memtarget) {
2900     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2901     faststore_reg_override=HOST_TEMPREG;
2902   }
2903
2904   if (opcode[i]==0x28) { // SB
2905     if(!c||memtarget) {
2906       int x=0,a=temp;
2907 #ifdef BIG_ENDIAN_MIPS
2908       if(!c) emit_xorimm(addr,3,temp);
2909       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2910 #else
2911       if(!c) a=addr;
2912 #endif
2913       if(faststore_reg_override) a=faststore_reg_override;
2914       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
2915       emit_writebyte_indexed_tlb(tl,x,a,map,a);
2916     }
2917     type=STOREB_STUB;
2918   }
2919   if (opcode[i]==0x29) { // SH
2920     if(!c||memtarget) {
2921       int x=0,a=temp;
2922 #ifdef BIG_ENDIAN_MIPS
2923       if(!c) emit_xorimm(addr,2,temp);
2924       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2925 #else
2926       if(!c) a=addr;
2927 #endif
2928       if(faststore_reg_override) a=faststore_reg_override;
2929       //#ifdef
2930       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
2931       //#else
2932       if(map>=0) {
2933         emit_writehword_indexed(tl,x,a);
2934       }else
2935         //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
2936         emit_writehword_indexed(tl,x,a);
2937     }
2938     type=STOREH_STUB;
2939   }
2940   if (opcode[i]==0x2B) { // SW
2941     if(!c||memtarget) {
2942       int a=addr;
2943       if(faststore_reg_override) a=faststore_reg_override;
2944       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
2945       emit_writeword_indexed_tlb(tl,0,a,map,temp);
2946     }
2947     type=STOREW_STUB;
2948   }
2949   if (opcode[i]==0x3F) { // SD
2950     if(!c||memtarget) {
2951       int a=addr;
2952       if(faststore_reg_override) a=faststore_reg_override;
2953       if(rs2[i]) {
2954         assert(th>=0);
2955         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
2956         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
2957         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
2958       }else{
2959         // Store zero
2960         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
2961         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
2962         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
2963       }
2964     }
2965     type=STORED_STUB;
2966   }
2967   if(jaddr) {
2968     // PCSX store handlers don't check invcode again
2969     reglist|=1<<addr;
2970     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2971     jaddr=0;
2972   }
2973   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2974     if(!c||memtarget) {
2975       #ifdef DESTRUCTIVE_SHIFT
2976       // The x86 shift operation is 'destructive'; it overwrites the
2977       // source register, so we need to make a copy first and use that.
2978       addr=temp;
2979       #endif
2980       #if defined(HOST_IMM8)
2981       int ir=get_reg(i_regs->regmap,INVCP);
2982       assert(ir>=0);
2983       emit_cmpmem_indexedsr12_reg(ir,addr,1);
2984       #else
2985       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
2986       #endif
2987       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2988       emit_callne(invalidate_addr_reg[addr]);
2989       #else
2990       int jaddr2=(int)out;
2991       emit_jne(0);
2992       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2993       #endif
2994     }
2995   }
2996   u_int addr_val=constmap[i][s]+offset;
2997   if(jaddr) {
2998     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2999   } else if(c&&!memtarget) {
3000     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3001   }
3002   // basic current block modification detection..
3003   // not looking back as that should be in mips cache already
3004   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3005     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3006     assert(i_regs->regmap==regs[i].regmap); // not delay slot
3007     if(i_regs->regmap==regs[i].regmap) {
3008       load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3009       wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3010       emit_movimm(start+i*4+4,0);
3011       emit_writeword(0,(int)&pcaddr);
3012       emit_jmp((int)do_interrupt);
3013     }
3014   }
3015   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3016   //if(opcode[i]==0x2B || opcode[i]==0x28)
3017   //if(opcode[i]==0x2B || opcode[i]==0x29)
3018   //if(opcode[i]==0x2B)
3019   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3020   {
3021     #ifdef __i386__
3022     emit_pusha();
3023     #endif
3024     #ifdef __arm__
3025     save_regs(0x100f);
3026     #endif
3027         emit_readword((int)&last_count,ECX);
3028         #ifdef __i386__
3029         if(get_reg(i_regs->regmap,CCREG)<0)
3030           emit_loadreg(CCREG,HOST_CCREG);
3031         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3032         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3033         emit_writeword(HOST_CCREG,(int)&Count);
3034         #endif
3035         #ifdef __arm__
3036         if(get_reg(i_regs->regmap,CCREG)<0)
3037           emit_loadreg(CCREG,0);
3038         else
3039           emit_mov(HOST_CCREG,0);
3040         emit_add(0,ECX,0);
3041         emit_addimm(0,2*ccadj[i],0);
3042         emit_writeword(0,(int)&Count);
3043         #endif
3044     emit_call((int)memdebug);
3045     #ifdef __i386__
3046     emit_popa();
3047     #endif
3048     #ifdef __arm__
3049     restore_regs(0x100f);
3050     #endif
3051   }*/
3052 }
3053
3054 void storelr_assemble(int i,struct regstat *i_regs)
3055 {
3056   int s,th,tl;
3057   int temp;
3058   int temp2=-1;
3059   int offset;
3060   int jaddr=0;
3061   int case1,case2,case3;
3062   int done0,done1,done2;
3063   int memtarget=0,c=0;
3064   int agr=AGEN1+(i&1);
3065   u_int hr,reglist=0;
3066   th=get_reg(i_regs->regmap,rs2[i]|64);
3067   tl=get_reg(i_regs->regmap,rs2[i]);
3068   s=get_reg(i_regs->regmap,rs1[i]);
3069   temp=get_reg(i_regs->regmap,agr);
3070   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3071   offset=imm[i];
3072   if(s>=0) {
3073     c=(i_regs->isconst>>s)&1;
3074     if(c) {
3075       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3076     }
3077   }
3078   assert(tl>=0);
3079   for(hr=0;hr<HOST_REGS;hr++) {
3080     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3081   }
3082   assert(temp>=0);
3083   if(!c) {
3084     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3085     if(!offset&&s!=temp) emit_mov(s,temp);
3086     jaddr=(int)out;
3087     emit_jno(0);
3088   }
3089   else
3090   {
3091     if(!memtarget||!rs1[i]) {
3092       jaddr=(int)out;
3093       emit_jmp(0);
3094     }
3095   }
3096   #ifdef RAM_OFFSET
3097   int map=get_reg(i_regs->regmap,ROREG);
3098   if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3099   #else
3100   if((u_int)rdram!=0x80000000)
3101     emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3102   #endif
3103
3104   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3105     temp2=get_reg(i_regs->regmap,FTEMP);
3106     if(!rs2[i]) temp2=th=tl;
3107   }
3108
3109 #ifndef BIG_ENDIAN_MIPS
3110     emit_xorimm(temp,3,temp);
3111 #endif
3112   emit_testimm(temp,2);
3113   case2=(int)out;
3114   emit_jne(0);
3115   emit_testimm(temp,1);
3116   case1=(int)out;
3117   emit_jne(0);
3118   // 0
3119   if (opcode[i]==0x2A) { // SWL
3120     emit_writeword_indexed(tl,0,temp);
3121   }
3122   if (opcode[i]==0x2E) { // SWR
3123     emit_writebyte_indexed(tl,3,temp);
3124   }
3125   if (opcode[i]==0x2C) { // SDL
3126     emit_writeword_indexed(th,0,temp);
3127     if(rs2[i]) emit_mov(tl,temp2);
3128   }
3129   if (opcode[i]==0x2D) { // SDR
3130     emit_writebyte_indexed(tl,3,temp);
3131     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3132   }
3133   done0=(int)out;
3134   emit_jmp(0);
3135   // 1
3136   set_jump_target(case1,(int)out);
3137   if (opcode[i]==0x2A) { // SWL
3138     // Write 3 msb into three least significant bytes
3139     if(rs2[i]) emit_rorimm(tl,8,tl);
3140     emit_writehword_indexed(tl,-1,temp);
3141     if(rs2[i]) emit_rorimm(tl,16,tl);
3142     emit_writebyte_indexed(tl,1,temp);
3143     if(rs2[i]) emit_rorimm(tl,8,tl);
3144   }
3145   if (opcode[i]==0x2E) { // SWR
3146     // Write two lsb into two most significant bytes
3147     emit_writehword_indexed(tl,1,temp);
3148   }
3149   if (opcode[i]==0x2C) { // SDL
3150     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3151     // Write 3 msb into three least significant bytes
3152     if(rs2[i]) emit_rorimm(th,8,th);
3153     emit_writehword_indexed(th,-1,temp);
3154     if(rs2[i]) emit_rorimm(th,16,th);
3155     emit_writebyte_indexed(th,1,temp);
3156     if(rs2[i]) emit_rorimm(th,8,th);
3157   }
3158   if (opcode[i]==0x2D) { // SDR
3159     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3160     // Write two lsb into two most significant bytes
3161     emit_writehword_indexed(tl,1,temp);
3162   }
3163   done1=(int)out;
3164   emit_jmp(0);
3165   // 2
3166   set_jump_target(case2,(int)out);
3167   emit_testimm(temp,1);
3168   case3=(int)out;
3169   emit_jne(0);
3170   if (opcode[i]==0x2A) { // SWL
3171     // Write two msb into two least significant bytes
3172     if(rs2[i]) emit_rorimm(tl,16,tl);
3173     emit_writehword_indexed(tl,-2,temp);
3174     if(rs2[i]) emit_rorimm(tl,16,tl);
3175   }
3176   if (opcode[i]==0x2E) { // SWR
3177     // Write 3 lsb into three most significant bytes
3178     emit_writebyte_indexed(tl,-1,temp);
3179     if(rs2[i]) emit_rorimm(tl,8,tl);
3180     emit_writehword_indexed(tl,0,temp);
3181     if(rs2[i]) emit_rorimm(tl,24,tl);
3182   }
3183   if (opcode[i]==0x2C) { // SDL
3184     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3185     // Write two msb into two least significant bytes
3186     if(rs2[i]) emit_rorimm(th,16,th);
3187     emit_writehword_indexed(th,-2,temp);
3188     if(rs2[i]) emit_rorimm(th,16,th);
3189   }
3190   if (opcode[i]==0x2D) { // SDR
3191     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3192     // Write 3 lsb into three most significant bytes
3193     emit_writebyte_indexed(tl,-1,temp);
3194     if(rs2[i]) emit_rorimm(tl,8,tl);
3195     emit_writehword_indexed(tl,0,temp);
3196     if(rs2[i]) emit_rorimm(tl,24,tl);
3197   }
3198   done2=(int)out;
3199   emit_jmp(0);
3200   // 3
3201   set_jump_target(case3,(int)out);
3202   if (opcode[i]==0x2A) { // SWL
3203     // Write msb into least significant byte
3204     if(rs2[i]) emit_rorimm(tl,24,tl);
3205     emit_writebyte_indexed(tl,-3,temp);
3206     if(rs2[i]) emit_rorimm(tl,8,tl);
3207   }
3208   if (opcode[i]==0x2E) { // SWR
3209     // Write entire word
3210     emit_writeword_indexed(tl,-3,temp);
3211   }
3212   if (opcode[i]==0x2C) { // SDL
3213     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3214     // Write msb into least significant byte
3215     if(rs2[i]) emit_rorimm(th,24,th);
3216     emit_writebyte_indexed(th,-3,temp);
3217     if(rs2[i]) emit_rorimm(th,8,th);
3218   }
3219   if (opcode[i]==0x2D) { // SDR
3220     if(rs2[i]) emit_mov(th,temp2);
3221     // Write entire word
3222     emit_writeword_indexed(tl,-3,temp);
3223   }
3224   set_jump_target(done0,(int)out);
3225   set_jump_target(done1,(int)out);
3226   set_jump_target(done2,(int)out);
3227   if (opcode[i]==0x2C) { // SDL
3228     emit_testimm(temp,4);
3229     done0=(int)out;
3230     emit_jne(0);
3231     emit_andimm(temp,~3,temp);
3232     emit_writeword_indexed(temp2,4,temp);
3233     set_jump_target(done0,(int)out);
3234   }
3235   if (opcode[i]==0x2D) { // SDR
3236     emit_testimm(temp,4);
3237     done0=(int)out;
3238     emit_jeq(0);
3239     emit_andimm(temp,~3,temp);
3240     emit_writeword_indexed(temp2,-4,temp);
3241     set_jump_target(done0,(int)out);
3242   }
3243   if(!c||!memtarget)
3244     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3245   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3246     #ifdef RAM_OFFSET
3247     int map=get_reg(i_regs->regmap,ROREG);
3248     if(map<0) map=HOST_TEMPREG;
3249     gen_orig_addr_w(temp,map);
3250     #else
3251     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3252     #endif
3253     #if defined(HOST_IMM8)
3254     int ir=get_reg(i_regs->regmap,INVCP);
3255     assert(ir>=0);
3256     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3257     #else
3258     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3259     #endif
3260     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3261     emit_callne(invalidate_addr_reg[temp]);
3262     #else
3263     int jaddr2=(int)out;
3264     emit_jne(0);
3265     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3266     #endif
3267   }
3268   /*
3269     emit_pusha();
3270     //save_regs(0x100f);
3271         emit_readword((int)&last_count,ECX);
3272         if(get_reg(i_regs->regmap,CCREG)<0)
3273           emit_loadreg(CCREG,HOST_CCREG);
3274         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3275         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3276         emit_writeword(HOST_CCREG,(int)&Count);
3277     emit_call((int)memdebug);
3278     emit_popa();
3279     //restore_regs(0x100f);
3280   */
3281 }
3282
3283 void c1ls_assemble(int i,struct regstat *i_regs)
3284 {
3285   cop1_unusable(i, i_regs);
3286 }
3287
3288 void c2ls_assemble(int i,struct regstat *i_regs)
3289 {
3290   int s,tl;
3291   int ar;
3292   int offset;
3293   int memtarget=0,c=0;
3294   int jaddr2=0,type;
3295   int agr=AGEN1+(i&1);
3296   int fastio_reg_override=0;
3297   u_int hr,reglist=0;
3298   u_int copr=(source[i]>>16)&0x1f;
3299   s=get_reg(i_regs->regmap,rs1[i]);
3300   tl=get_reg(i_regs->regmap,FTEMP);
3301   offset=imm[i];
3302   assert(rs1[i]>0);
3303   assert(tl>=0);
3304
3305   for(hr=0;hr<HOST_REGS;hr++) {
3306     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3307   }
3308   if(i_regs->regmap[HOST_CCREG]==CCREG)
3309     reglist&=~(1<<HOST_CCREG);
3310
3311   // get the address
3312   if (opcode[i]==0x3a) { // SWC2
3313     ar=get_reg(i_regs->regmap,agr);
3314     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3315     reglist|=1<<ar;
3316   } else { // LWC2
3317     ar=tl;
3318   }
3319   if(s>=0) c=(i_regs->wasconst>>s)&1;
3320   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3321   if (!offset&&!c&&s>=0) ar=s;
3322   assert(ar>=0);
3323
3324   if (opcode[i]==0x3a) { // SWC2
3325     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3326     type=STOREW_STUB;
3327   }
3328   else
3329     type=LOADW_STUB;
3330
3331   if(c&&!memtarget) {
3332     jaddr2=(int)out;
3333     emit_jmp(0); // inline_readstub/inline_writestub?
3334   }
3335   else {
3336     if(!c) {
3337       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3338     }
3339     else if(ram_offset&&memtarget) {
3340       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3341       fastio_reg_override=HOST_TEMPREG;
3342     }
3343     if (opcode[i]==0x32) { // LWC2
3344       #ifdef HOST_IMM_ADDR32
3345       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3346       else
3347       #endif
3348       int a=ar;
3349       if(fastio_reg_override) a=fastio_reg_override;
3350       emit_readword_indexed(0,a,tl);
3351     }
3352     if (opcode[i]==0x3a) { // SWC2
3353       #ifdef DESTRUCTIVE_SHIFT
3354       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3355       #endif
3356       int a=ar;
3357       if(fastio_reg_override) a=fastio_reg_override;
3358       emit_writeword_indexed(tl,0,a);
3359     }
3360   }
3361   if(jaddr2)
3362     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3363   if(opcode[i]==0x3a) // SWC2
3364   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3365 #if defined(HOST_IMM8)
3366     int ir=get_reg(i_regs->regmap,INVCP);
3367     assert(ir>=0);
3368     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3369 #else
3370     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3371 #endif
3372     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3373     emit_callne(invalidate_addr_reg[ar]);
3374     #else
3375     int jaddr3=(int)out;
3376     emit_jne(0);
3377     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3378     #endif
3379   }
3380   if (opcode[i]==0x32) { // LWC2
3381     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3382   }
3383 }
3384
3385 #ifndef multdiv_assemble
3386 void multdiv_assemble(int i,struct regstat *i_regs)
3387 {
3388   printf("Need multdiv_assemble for this architecture.\n");
3389   exit(1);
3390 }
3391 #endif
3392
3393 void mov_assemble(int i,struct regstat *i_regs)
3394 {
3395   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3396   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3397   if(rt1[i]) {
3398     signed char sh,sl,th,tl;
3399     th=get_reg(i_regs->regmap,rt1[i]|64);
3400     tl=get_reg(i_regs->regmap,rt1[i]);
3401     //assert(tl>=0);
3402     if(tl>=0) {
3403       sh=get_reg(i_regs->regmap,rs1[i]|64);
3404       sl=get_reg(i_regs->regmap,rs1[i]);
3405       if(sl>=0) emit_mov(sl,tl);
3406       else emit_loadreg(rs1[i],tl);
3407       if(th>=0) {
3408         if(sh>=0) emit_mov(sh,th);
3409         else emit_loadreg(rs1[i]|64,th);
3410       }
3411     }
3412   }
3413 }
3414
3415 #ifndef fconv_assemble
3416 void fconv_assemble(int i,struct regstat *i_regs)
3417 {
3418   printf("Need fconv_assemble for this architecture.\n");
3419   exit(1);
3420 }
3421 #endif
3422
3423 #if 0
3424 void float_assemble(int i,struct regstat *i_regs)
3425 {
3426   printf("Need float_assemble for this architecture.\n");
3427   exit(1);
3428 }
3429 #endif
3430
3431 void syscall_assemble(int i,struct regstat *i_regs)
3432 {
3433   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3434   assert(ccreg==HOST_CCREG);
3435   assert(!is_delayslot);
3436   (void)ccreg;
3437   emit_movimm(start+i*4,EAX); // Get PC
3438   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3439   emit_jmp((int)jump_syscall_hle); // XXX
3440 }
3441
3442 void hlecall_assemble(int i,struct regstat *i_regs)
3443 {
3444   extern void psxNULL();
3445   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3446   assert(ccreg==HOST_CCREG);
3447   assert(!is_delayslot);
3448   (void)ccreg;
3449   emit_movimm(start+i*4+4,0); // Get PC
3450   uint32_t hleCode = source[i] & 0x03ffffff;
3451   if (hleCode >= (sizeof(psxHLEt) / sizeof(psxHLEt[0])))
3452     emit_movimm((int)psxNULL,1);
3453   else
3454     emit_movimm((int)psxHLEt[hleCode],1);
3455   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3456   emit_jmp((int)jump_hlecall);
3457 }
3458
3459 void intcall_assemble(int i,struct regstat *i_regs)
3460 {
3461   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3462   assert(ccreg==HOST_CCREG);
3463   assert(!is_delayslot);
3464   (void)ccreg;
3465   emit_movimm(start+i*4,0); // Get PC
3466   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3467   emit_jmp((int)jump_intcall);
3468 }
3469
3470 void ds_assemble(int i,struct regstat *i_regs)
3471 {
3472   speculate_register_values(i);
3473   is_delayslot=1;
3474   switch(itype[i]) {
3475     case ALU:
3476       alu_assemble(i,i_regs);break;
3477     case IMM16:
3478       imm16_assemble(i,i_regs);break;
3479     case SHIFT:
3480       shift_assemble(i,i_regs);break;
3481     case SHIFTIMM:
3482       shiftimm_assemble(i,i_regs);break;
3483     case LOAD:
3484       load_assemble(i,i_regs);break;
3485     case LOADLR:
3486       loadlr_assemble(i,i_regs);break;
3487     case STORE:
3488       store_assemble(i,i_regs);break;
3489     case STORELR:
3490       storelr_assemble(i,i_regs);break;
3491     case COP0:
3492       cop0_assemble(i,i_regs);break;
3493     case COP1:
3494       cop1_assemble(i,i_regs);break;
3495     case C1LS:
3496       c1ls_assemble(i,i_regs);break;
3497     case COP2:
3498       cop2_assemble(i,i_regs);break;
3499     case C2LS:
3500       c2ls_assemble(i,i_regs);break;
3501     case C2OP:
3502       c2op_assemble(i,i_regs);break;
3503     case FCONV:
3504       fconv_assemble(i,i_regs);break;
3505     case FLOAT:
3506       float_assemble(i,i_regs);break;
3507     case FCOMP:
3508       fcomp_assemble(i,i_regs);break;
3509     case MULTDIV:
3510       multdiv_assemble(i,i_regs);break;
3511     case MOV:
3512       mov_assemble(i,i_regs);break;
3513     case SYSCALL:
3514     case HLECALL:
3515     case INTCALL:
3516     case SPAN:
3517     case UJUMP:
3518     case RJUMP:
3519     case CJUMP:
3520     case SJUMP:
3521     case FJUMP:
3522       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
3523   }
3524   is_delayslot=0;
3525 }
3526
3527 // Is the branch target a valid internal jump?
3528 int internal_branch(uint64_t i_is32,int addr)
3529 {
3530   if(addr&1) return 0; // Indirect (register) jump
3531   if(addr>=start && addr<start+slen*4-4)
3532   {
3533     //int t=(addr-start)>>2;
3534     // Delay slots are not valid branch targets
3535     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3536     // 64 -> 32 bit transition requires a recompile
3537     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3538     {
3539       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3540       else printf("optimizable: yes\n");
3541     }*/
3542     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3543     return 1;
3544   }
3545   return 0;
3546 }
3547
3548 #ifndef wb_invalidate
3549 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3550   uint64_t u,uint64_t uu)
3551 {
3552   int hr;
3553   for(hr=0;hr<HOST_REGS;hr++) {
3554     if(hr!=EXCLUDE_REG) {
3555       if(pre[hr]!=entry[hr]) {
3556         if(pre[hr]>=0) {
3557           if((dirty>>hr)&1) {
3558             if(get_reg(entry,pre[hr])<0) {
3559               if(pre[hr]<64) {
3560                 if(!((u>>pre[hr])&1)) {
3561                   emit_storereg(pre[hr],hr);
3562                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3563                     emit_sarimm(hr,31,hr);
3564                     emit_storereg(pre[hr]|64,hr);
3565                   }
3566                 }
3567               }else{
3568                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3569                   emit_storereg(pre[hr],hr);
3570                 }
3571               }
3572             }
3573           }
3574         }
3575       }
3576     }
3577   }
3578   // Move from one register to another (no writeback)
3579   for(hr=0;hr<HOST_REGS;hr++) {
3580     if(hr!=EXCLUDE_REG) {
3581       if(pre[hr]!=entry[hr]) {
3582         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3583           int nr;
3584           if((nr=get_reg(entry,pre[hr]))>=0) {
3585             emit_mov(hr,nr);
3586           }
3587         }
3588       }
3589     }
3590   }
3591 }
3592 #endif
3593
3594 // Load the specified registers
3595 // This only loads the registers given as arguments because
3596 // we don't want to load things that will be overwritten
3597 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3598 {
3599   int hr;
3600   // Load 32-bit regs
3601   for(hr=0;hr<HOST_REGS;hr++) {
3602     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3603       if(entry[hr]!=regmap[hr]) {
3604         if(regmap[hr]==rs1||regmap[hr]==rs2)
3605         {
3606           if(regmap[hr]==0) {
3607             emit_zeroreg(hr);
3608           }
3609           else
3610           {
3611             emit_loadreg(regmap[hr],hr);
3612           }
3613         }
3614       }
3615     }
3616   }
3617   //Load 64-bit regs
3618   for(hr=0;hr<HOST_REGS;hr++) {
3619     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3620       if(entry[hr]!=regmap[hr]) {
3621         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3622         {
3623           assert(regmap[hr]!=64);
3624           if((is32>>(regmap[hr]&63))&1) {
3625             int lr=get_reg(regmap,regmap[hr]-64);
3626             if(lr>=0)
3627               emit_sarimm(lr,31,hr);
3628             else
3629               emit_loadreg(regmap[hr],hr);
3630           }
3631           else
3632           {
3633             emit_loadreg(regmap[hr],hr);
3634           }
3635         }
3636       }
3637     }
3638   }
3639 }
3640
3641 // Load registers prior to the start of a loop
3642 // so that they are not loaded within the loop
3643 static void loop_preload(signed char pre[],signed char entry[])
3644 {
3645   int hr;
3646   for(hr=0;hr<HOST_REGS;hr++) {
3647     if(hr!=EXCLUDE_REG) {
3648       if(pre[hr]!=entry[hr]) {
3649         if(entry[hr]>=0) {
3650           if(get_reg(pre,entry[hr])<0) {
3651             assem_debug("loop preload:\n");
3652             //printf("loop preload: %d\n",hr);
3653             if(entry[hr]==0) {
3654               emit_zeroreg(hr);
3655             }
3656             else if(entry[hr]<TEMPREG)
3657             {
3658               emit_loadreg(entry[hr],hr);
3659             }
3660             else if(entry[hr]-64<TEMPREG)
3661             {
3662               emit_loadreg(entry[hr],hr);
3663             }
3664           }
3665         }
3666       }
3667     }
3668   }
3669 }
3670
3671 // Generate address for load/store instruction
3672 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3673 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3674 {
3675   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3676     int ra=-1;
3677     int agr=AGEN1+(i&1);
3678     if(itype[i]==LOAD) {
3679       ra=get_reg(i_regs->regmap,rt1[i]);
3680       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3681       assert(ra>=0);
3682     }
3683     if(itype[i]==LOADLR) {
3684       ra=get_reg(i_regs->regmap,FTEMP);
3685     }
3686     if(itype[i]==STORE||itype[i]==STORELR) {
3687       ra=get_reg(i_regs->regmap,agr);
3688       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3689     }
3690     if(itype[i]==C1LS||itype[i]==C2LS) {
3691       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3692         ra=get_reg(i_regs->regmap,FTEMP);
3693       else { // SWC1/SDC1/SWC2/SDC2
3694         ra=get_reg(i_regs->regmap,agr);
3695         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3696       }
3697     }
3698     int rs=get_reg(i_regs->regmap,rs1[i]);
3699     if(ra>=0) {
3700       int offset=imm[i];
3701       int c=(i_regs->wasconst>>rs)&1;
3702       if(rs1[i]==0) {
3703         // Using r0 as a base address
3704         if(!entry||entry[ra]!=agr) {
3705           if (opcode[i]==0x22||opcode[i]==0x26) {
3706             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3707           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3708             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3709           }else{
3710             emit_movimm(offset,ra);
3711           }
3712         } // else did it in the previous cycle
3713       }
3714       else if(rs<0) {
3715         if(!entry||entry[ra]!=rs1[i])
3716           emit_loadreg(rs1[i],ra);
3717         //if(!entry||entry[ra]!=rs1[i])
3718         //  printf("poor load scheduling!\n");
3719       }
3720       else if(c) {
3721         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3722           if(!entry||entry[ra]!=agr) {
3723             if (opcode[i]==0x22||opcode[i]==0x26) {
3724               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3725             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3726               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3727             }else{
3728               #ifdef HOST_IMM_ADDR32
3729               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3730               #endif
3731               emit_movimm(constmap[i][rs]+offset,ra);
3732               regs[i].loadedconst|=1<<ra;
3733             }
3734           } // else did it in the previous cycle
3735         } // else load_consts already did it
3736       }
3737       if(offset&&!c&&rs1[i]) {
3738         if(rs>=0) {
3739           emit_addimm(rs,offset,ra);
3740         }else{
3741           emit_addimm(ra,offset,ra);
3742         }
3743       }
3744     }
3745   }
3746   // Preload constants for next instruction
3747   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3748     int agr,ra;
3749     // Actual address
3750     agr=AGEN1+((i+1)&1);
3751     ra=get_reg(i_regs->regmap,agr);
3752     if(ra>=0) {
3753       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
3754       int offset=imm[i+1];
3755       int c=(regs[i+1].wasconst>>rs)&1;
3756       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
3757         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3758           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3759         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3760           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3761         }else{
3762           #ifdef HOST_IMM_ADDR32
3763           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3764           #endif
3765           emit_movimm(constmap[i+1][rs]+offset,ra);
3766           regs[i+1].loadedconst|=1<<ra;
3767         }
3768       }
3769       else if(rs1[i+1]==0) {
3770         // Using r0 as a base address
3771         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3772           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3773         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3774           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3775         }else{
3776           emit_movimm(offset,ra);
3777         }
3778       }
3779     }
3780   }
3781 }
3782
3783 static int get_final_value(int hr, int i, int *value)
3784 {
3785   int reg=regs[i].regmap[hr];
3786   while(i<slen-1) {
3787     if(regs[i+1].regmap[hr]!=reg) break;
3788     if(!((regs[i+1].isconst>>hr)&1)) break;
3789     if(bt[i+1]) break;
3790     i++;
3791   }
3792   if(i<slen-1) {
3793     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
3794       *value=constmap[i][hr];
3795       return 1;
3796     }
3797     if(!bt[i+1]) {
3798       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
3799         // Load in delay slot, out-of-order execution
3800         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
3801         {
3802           // Precompute load address
3803           *value=constmap[i][hr]+imm[i+2];
3804           return 1;
3805         }
3806       }
3807       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
3808       {
3809         // Precompute load address
3810         *value=constmap[i][hr]+imm[i+1];
3811         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
3812         return 1;
3813       }
3814     }
3815   }
3816   *value=constmap[i][hr];
3817   //printf("c=%x\n",(int)constmap[i][hr]);
3818   if(i==slen-1) return 1;
3819   if(reg<64) {
3820     return !((unneeded_reg[i+1]>>reg)&1);
3821   }else{
3822     return !((unneeded_reg_upper[i+1]>>reg)&1);
3823   }
3824 }
3825
3826 // Load registers with known constants
3827 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
3828 {
3829   int hr,hr2;
3830   // propagate loaded constant flags
3831   if(i==0||bt[i])
3832     regs[i].loadedconst=0;
3833   else {
3834     for(hr=0;hr<HOST_REGS;hr++) {
3835       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
3836          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
3837       {
3838         regs[i].loadedconst|=1<<hr;
3839       }
3840     }
3841   }
3842   // Load 32-bit regs
3843   for(hr=0;hr<HOST_REGS;hr++) {
3844     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3845       //if(entry[hr]!=regmap[hr]) {
3846       if(!((regs[i].loadedconst>>hr)&1)) {
3847         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3848           int value,similar=0;
3849           if(get_final_value(hr,i,&value)) {
3850             // see if some other register has similar value
3851             for(hr2=0;hr2<HOST_REGS;hr2++) {
3852               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
3853                 if(is_similar_value(value,constmap[i][hr2])) {
3854                   similar=1;
3855                   break;
3856                 }
3857               }
3858             }
3859             if(similar) {
3860               int value2;
3861               if(get_final_value(hr2,i,&value2)) // is this needed?
3862                 emit_movimm_from(value2,hr2,value,hr);
3863               else
3864                 emit_movimm(value,hr);
3865             }
3866             else if(value==0) {
3867               emit_zeroreg(hr);
3868             }
3869             else {
3870               emit_movimm(value,hr);
3871             }
3872           }
3873           regs[i].loadedconst|=1<<hr;
3874         }
3875       }
3876     }
3877   }
3878   // Load 64-bit regs
3879   for(hr=0;hr<HOST_REGS;hr++) {
3880     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3881       //if(entry[hr]!=regmap[hr]) {
3882       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
3883         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3884           if((is32>>(regmap[hr]&63))&1) {
3885             int lr=get_reg(regmap,regmap[hr]-64);
3886             assert(lr>=0);
3887             emit_sarimm(lr,31,hr);
3888           }
3889           else
3890           {
3891             int value;
3892             if(get_final_value(hr,i,&value)) {
3893               if(value==0) {
3894                 emit_zeroreg(hr);
3895               }
3896               else {
3897                 emit_movimm(value,hr);
3898               }
3899             }
3900           }
3901         }
3902       }
3903     }
3904   }
3905 }
3906 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
3907 {
3908   int hr;
3909   // Load 32-bit regs
3910   for(hr=0;hr<HOST_REGS;hr++) {
3911     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3912       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3913         int value=constmap[i][hr];
3914         if(value==0) {
3915           emit_zeroreg(hr);
3916         }
3917         else {
3918           emit_movimm(value,hr);
3919         }
3920       }
3921     }
3922   }
3923   // Load 64-bit regs
3924   for(hr=0;hr<HOST_REGS;hr++) {
3925     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3926       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3927         if((is32>>(regmap[hr]&63))&1) {
3928           int lr=get_reg(regmap,regmap[hr]-64);
3929           assert(lr>=0);
3930           emit_sarimm(lr,31,hr);
3931         }
3932         else
3933         {
3934           int value=constmap[i][hr];
3935           if(value==0) {
3936             emit_zeroreg(hr);
3937           }
3938           else {
3939             emit_movimm(value,hr);
3940           }
3941         }
3942       }
3943     }
3944   }
3945 }
3946
3947 // Write out all dirty registers (except cycle count)
3948 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
3949 {
3950   int hr;
3951   for(hr=0;hr<HOST_REGS;hr++) {
3952     if(hr!=EXCLUDE_REG) {
3953       if(i_regmap[hr]>0) {
3954         if(i_regmap[hr]!=CCREG) {
3955           if((i_dirty>>hr)&1) {
3956             if(i_regmap[hr]<64) {
3957               emit_storereg(i_regmap[hr],hr);
3958             }else{
3959               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3960                 emit_storereg(i_regmap[hr],hr);
3961               }
3962             }
3963           }
3964         }
3965       }
3966     }
3967   }
3968 }
3969 // Write out dirty registers that we need to reload (pair with load_needed_regs)
3970 // This writes the registers not written by store_regs_bt
3971 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
3972 {
3973   int hr;
3974   int t=(addr-start)>>2;
3975   for(hr=0;hr<HOST_REGS;hr++) {
3976     if(hr!=EXCLUDE_REG) {
3977       if(i_regmap[hr]>0) {
3978         if(i_regmap[hr]!=CCREG) {
3979           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
3980             if((i_dirty>>hr)&1) {
3981               if(i_regmap[hr]<64) {
3982                 emit_storereg(i_regmap[hr],hr);
3983               }else{
3984                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3985                   emit_storereg(i_regmap[hr],hr);
3986                 }
3987               }
3988             }
3989           }
3990         }
3991       }
3992     }
3993   }
3994 }
3995
3996 // Load all registers (except cycle count)
3997 void load_all_regs(signed char i_regmap[])
3998 {
3999   int hr;
4000   for(hr=0;hr<HOST_REGS;hr++) {
4001     if(hr!=EXCLUDE_REG) {
4002       if(i_regmap[hr]==0) {
4003         emit_zeroreg(hr);
4004       }
4005       else
4006       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4007       {
4008         emit_loadreg(i_regmap[hr],hr);
4009       }
4010     }
4011   }
4012 }
4013
4014 // Load all current registers also needed by next instruction
4015 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4016 {
4017   int hr;
4018   for(hr=0;hr<HOST_REGS;hr++) {
4019     if(hr!=EXCLUDE_REG) {
4020       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4021         if(i_regmap[hr]==0) {
4022           emit_zeroreg(hr);
4023         }
4024         else
4025         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4026         {
4027           emit_loadreg(i_regmap[hr],hr);
4028         }
4029       }
4030     }
4031   }
4032 }
4033
4034 // Load all regs, storing cycle count if necessary
4035 void load_regs_entry(int t)
4036 {
4037   int hr;
4038   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4039   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4040   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4041     emit_storereg(CCREG,HOST_CCREG);
4042   }
4043   // Load 32-bit regs
4044   for(hr=0;hr<HOST_REGS;hr++) {
4045     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4046       if(regs[t].regmap_entry[hr]==0) {
4047         emit_zeroreg(hr);
4048       }
4049       else if(regs[t].regmap_entry[hr]!=CCREG)
4050       {
4051         emit_loadreg(regs[t].regmap_entry[hr],hr);
4052       }
4053     }
4054   }
4055   // Load 64-bit regs
4056   for(hr=0;hr<HOST_REGS;hr++) {
4057     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4058       assert(regs[t].regmap_entry[hr]!=64);
4059       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4060         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4061         if(lr<0) {
4062           emit_loadreg(regs[t].regmap_entry[hr],hr);
4063         }
4064         else
4065         {
4066           emit_sarimm(lr,31,hr);
4067         }
4068       }
4069       else
4070       {
4071         emit_loadreg(regs[t].regmap_entry[hr],hr);
4072       }
4073     }
4074   }
4075 }
4076
4077 // Store dirty registers prior to branch
4078 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4079 {
4080   if(internal_branch(i_is32,addr))
4081   {
4082     int t=(addr-start)>>2;
4083     int hr;
4084     for(hr=0;hr<HOST_REGS;hr++) {
4085       if(hr!=EXCLUDE_REG) {
4086         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4087           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4088             if((i_dirty>>hr)&1) {
4089               if(i_regmap[hr]<64) {
4090                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4091                   emit_storereg(i_regmap[hr],hr);
4092                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4093                     #ifdef DESTRUCTIVE_WRITEBACK
4094                     emit_sarimm(hr,31,hr);
4095                     emit_storereg(i_regmap[hr]|64,hr);
4096                     #else
4097                     emit_sarimm(hr,31,HOST_TEMPREG);
4098                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4099                     #endif
4100                   }
4101                 }
4102               }else{
4103                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4104                   emit_storereg(i_regmap[hr],hr);
4105                 }
4106               }
4107             }
4108           }
4109         }
4110       }
4111     }
4112   }
4113   else
4114   {
4115     // Branch out of this block, write out all dirty regs
4116     wb_dirtys(i_regmap,i_is32,i_dirty);
4117   }
4118 }
4119
4120 // Load all needed registers for branch target
4121 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4122 {
4123   //if(addr>=start && addr<(start+slen*4))
4124   if(internal_branch(i_is32,addr))
4125   {
4126     int t=(addr-start)>>2;
4127     int hr;
4128     // Store the cycle count before loading something else
4129     if(i_regmap[HOST_CCREG]!=CCREG) {
4130       assert(i_regmap[HOST_CCREG]==-1);
4131     }
4132     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4133       emit_storereg(CCREG,HOST_CCREG);
4134     }
4135     // Load 32-bit regs
4136     for(hr=0;hr<HOST_REGS;hr++) {
4137       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4138         #ifdef DESTRUCTIVE_WRITEBACK
4139         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4140         #else
4141         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4142         #endif
4143           if(regs[t].regmap_entry[hr]==0) {
4144             emit_zeroreg(hr);
4145           }
4146           else if(regs[t].regmap_entry[hr]!=CCREG)
4147           {
4148             emit_loadreg(regs[t].regmap_entry[hr],hr);
4149           }
4150         }
4151       }
4152     }
4153     //Load 64-bit regs
4154     for(hr=0;hr<HOST_REGS;hr++) {
4155       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4156         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4157           assert(regs[t].regmap_entry[hr]!=64);
4158           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4159             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4160             if(lr<0) {
4161               emit_loadreg(regs[t].regmap_entry[hr],hr);
4162             }
4163             else
4164             {
4165               emit_sarimm(lr,31,hr);
4166             }
4167           }
4168           else
4169           {
4170             emit_loadreg(regs[t].regmap_entry[hr],hr);
4171           }
4172         }
4173         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4174           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4175           assert(lr>=0);
4176           emit_sarimm(lr,31,hr);
4177         }
4178       }
4179     }
4180   }
4181 }
4182
4183 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4184 {
4185   if(addr>=start && addr<start+slen*4-4)
4186   {
4187     int t=(addr-start)>>2;
4188     int hr;
4189     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4190     for(hr=0;hr<HOST_REGS;hr++)
4191     {
4192       if(hr!=EXCLUDE_REG)
4193       {
4194         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4195         {
4196           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4197           {
4198             return 0;
4199           }
4200           else
4201           if((i_dirty>>hr)&1)
4202           {
4203             if(i_regmap[hr]<TEMPREG)
4204             {
4205               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4206                 return 0;
4207             }
4208             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4209             {
4210               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4211                 return 0;
4212             }
4213           }
4214         }
4215         else // Same register but is it 32-bit or dirty?
4216         if(i_regmap[hr]>=0)
4217         {
4218           if(!((regs[t].dirty>>hr)&1))
4219           {
4220             if((i_dirty>>hr)&1)
4221             {
4222               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4223               {
4224                 //printf("%x: dirty no match\n",addr);
4225                 return 0;
4226               }
4227             }
4228           }
4229           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4230           {
4231             //printf("%x: is32 no match\n",addr);
4232             return 0;
4233           }
4234         }
4235       }
4236     }
4237     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4238     // Delay slots are not valid branch targets
4239     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4240     // Delay slots require additional processing, so do not match
4241     if(is_ds[t]) return 0;
4242   }
4243   else
4244   {
4245     int hr;
4246     for(hr=0;hr<HOST_REGS;hr++)
4247     {
4248       if(hr!=EXCLUDE_REG)
4249       {
4250         if(i_regmap[hr]>=0)
4251         {
4252           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4253           {
4254             if((i_dirty>>hr)&1)
4255             {
4256               return 0;
4257             }
4258           }
4259         }
4260       }
4261     }
4262   }
4263   return 1;
4264 }
4265
4266 #ifdef DRC_DBG
4267 static void drc_dbg_emit_do_cmp(int i)
4268 {
4269   extern void do_insn_cmp();
4270   extern int cycle;
4271   u_int hr,reglist=0;
4272
4273   for(hr=0;hr<HOST_REGS;hr++)
4274     if(regs[i].regmap[hr]>=0) reglist|=1<<hr;
4275   save_regs(reglist);
4276   emit_movimm(start+i*4,0);
4277   emit_writeword(0,(int)&pcaddr);
4278   emit_call((int)do_insn_cmp);
4279   //emit_readword((int)&cycle,0);
4280   //emit_addimm(0,2,0);
4281   //emit_writeword(0,(int)&cycle);
4282   restore_regs(reglist);
4283 }
4284 #else
4285 #define drc_dbg_emit_do_cmp(x)
4286 #endif
4287
4288 // Used when a branch jumps into the delay slot of another branch
4289 void ds_assemble_entry(int i)
4290 {
4291   int t=(ba[i]-start)>>2;
4292   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4293   assem_debug("Assemble delay slot at %x\n",ba[i]);
4294   assem_debug("<->\n");
4295   drc_dbg_emit_do_cmp(t);
4296   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4297     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4298   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4299   address_generation(t,&regs[t],regs[t].regmap_entry);
4300   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4301     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4302   cop1_usable=0;
4303   is_delayslot=0;
4304   switch(itype[t]) {
4305     case ALU:
4306       alu_assemble(t,&regs[t]);break;
4307     case IMM16:
4308       imm16_assemble(t,&regs[t]);break;
4309     case SHIFT:
4310       shift_assemble(t,&regs[t]);break;
4311     case SHIFTIMM:
4312       shiftimm_assemble(t,&regs[t]);break;
4313     case LOAD:
4314       load_assemble(t,&regs[t]);break;
4315     case LOADLR:
4316       loadlr_assemble(t,&regs[t]);break;
4317     case STORE:
4318       store_assemble(t,&regs[t]);break;
4319     case STORELR:
4320       storelr_assemble(t,&regs[t]);break;
4321     case COP0:
4322       cop0_assemble(t,&regs[t]);break;
4323     case COP1:
4324       cop1_assemble(t,&regs[t]);break;
4325     case C1LS:
4326       c1ls_assemble(t,&regs[t]);break;
4327     case COP2:
4328       cop2_assemble(t,&regs[t]);break;
4329     case C2LS:
4330       c2ls_assemble(t,&regs[t]);break;
4331     case C2OP:
4332       c2op_assemble(t,&regs[t]);break;
4333     case FCONV:
4334       fconv_assemble(t,&regs[t]);break;
4335     case FLOAT:
4336       float_assemble(t,&regs[t]);break;
4337     case FCOMP:
4338       fcomp_assemble(t,&regs[t]);break;
4339     case MULTDIV:
4340       multdiv_assemble(t,&regs[t]);break;
4341     case MOV:
4342       mov_assemble(t,&regs[t]);break;
4343     case SYSCALL:
4344     case HLECALL:
4345     case INTCALL:
4346     case SPAN:
4347     case UJUMP:
4348     case RJUMP:
4349     case CJUMP:
4350     case SJUMP:
4351     case FJUMP:
4352       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4353   }
4354   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4355   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4356   if(internal_branch(regs[t].is32,ba[i]+4))
4357     assem_debug("branch: internal\n");
4358   else
4359     assem_debug("branch: external\n");
4360   assert(internal_branch(regs[t].is32,ba[i]+4));
4361   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4362   emit_jmp(0);
4363 }
4364
4365 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4366 {
4367   int count;
4368   int jaddr;
4369   int idle=0;
4370   int t=0;
4371   if(itype[i]==RJUMP)
4372   {
4373     *adj=0;
4374   }
4375   //if(ba[i]>=start && ba[i]<(start+slen*4))
4376   if(internal_branch(branch_regs[i].is32,ba[i]))
4377   {
4378     t=(ba[i]-start)>>2;
4379     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4380     else *adj=ccadj[t];
4381   }
4382   else
4383   {
4384     *adj=0;
4385   }
4386   count=ccadj[i];
4387   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4388     // Idle loop
4389     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4390     idle=(int)out;
4391     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4392     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4393     jaddr=(int)out;
4394     emit_jmp(0);
4395   }
4396   else if(*adj==0||invert) {
4397     int cycles=CLOCK_ADJUST(count+2);
4398     // faster loop HACK
4399     if (t&&*adj) {
4400       int rel=t-i;
4401       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4402         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4403     }
4404     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4405     jaddr=(int)out;
4406     emit_jns(0);
4407   }
4408   else
4409   {
4410     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4411     jaddr=(int)out;
4412     emit_jns(0);
4413   }
4414   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4415 }
4416
4417 void do_ccstub(int n)
4418 {
4419   literal_pool(256);
4420   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4421   set_jump_target(stubs[n][1],(int)out);
4422   int i=stubs[n][4];
4423   if(stubs[n][6]==NULLDS) {
4424     // Delay slot instruction is nullified ("likely" branch)
4425     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4426   }
4427   else if(stubs[n][6]!=TAKEN) {
4428     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4429   }
4430   else {
4431     if(internal_branch(branch_regs[i].is32,ba[i]))
4432       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4433   }
4434   if(stubs[n][5]!=-1)
4435   {
4436     // Save PC as return address
4437     emit_movimm(stubs[n][5],EAX);
4438     emit_writeword(EAX,(int)&pcaddr);
4439   }
4440   else
4441   {
4442     // Return address depends on which way the branch goes
4443     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4444     {
4445       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4446       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4447       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4448       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4449       if(rs1[i]==0)
4450       {
4451         s1l=s2l;s1h=s2h;
4452         s2l=s2h=-1;
4453       }
4454       else if(rs2[i]==0)
4455       {
4456         s2l=s2h=-1;
4457       }
4458       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4459         s1h=s2h=-1;
4460       }
4461       assert(s1l>=0);
4462       #ifdef DESTRUCTIVE_WRITEBACK
4463       if(rs1[i]) {
4464         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4465           emit_loadreg(rs1[i],s1l);
4466       }
4467       else {
4468         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4469           emit_loadreg(rs2[i],s1l);
4470       }
4471       if(s2l>=0)
4472         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4473           emit_loadreg(rs2[i],s2l);
4474       #endif
4475       int hr=0;
4476       int addr=-1,alt=-1,ntaddr=-1;
4477       while(hr<HOST_REGS)
4478       {
4479         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4480            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4481            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4482         {
4483           addr=hr++;break;
4484         }
4485         hr++;
4486       }
4487       while(hr<HOST_REGS)
4488       {
4489         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4490            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4491            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4492         {
4493           alt=hr++;break;
4494         }
4495         hr++;
4496       }
4497       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4498       {
4499         while(hr<HOST_REGS)
4500         {
4501           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4502              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4503              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4504           {
4505             ntaddr=hr;break;
4506           }
4507           hr++;
4508         }
4509         assert(hr<HOST_REGS);
4510       }
4511       if((opcode[i]&0x2f)==4) // BEQ
4512       {
4513         #ifdef HAVE_CMOV_IMM
4514         if(s1h<0) {
4515           if(s2l>=0) emit_cmp(s1l,s2l);
4516           else emit_test(s1l,s1l);
4517           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4518         }
4519         else
4520         #endif
4521         {
4522           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4523           if(s1h>=0) {
4524             if(s2h>=0) emit_cmp(s1h,s2h);
4525             else emit_test(s1h,s1h);
4526             emit_cmovne_reg(alt,addr);
4527           }
4528           if(s2l>=0) emit_cmp(s1l,s2l);
4529           else emit_test(s1l,s1l);
4530           emit_cmovne_reg(alt,addr);
4531         }
4532       }
4533       if((opcode[i]&0x2f)==5) // BNE
4534       {
4535         #ifdef HAVE_CMOV_IMM
4536         if(s1h<0) {
4537           if(s2l>=0) emit_cmp(s1l,s2l);
4538           else emit_test(s1l,s1l);
4539           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4540         }
4541         else
4542         #endif
4543         {
4544           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4545           if(s1h>=0) {
4546             if(s2h>=0) emit_cmp(s1h,s2h);
4547             else emit_test(s1h,s1h);
4548             emit_cmovne_reg(alt,addr);
4549           }
4550           if(s2l>=0) emit_cmp(s1l,s2l);
4551           else emit_test(s1l,s1l);
4552           emit_cmovne_reg(alt,addr);
4553         }
4554       }
4555       if((opcode[i]&0x2f)==6) // BLEZ
4556       {
4557         //emit_movimm(ba[i],alt);
4558         //emit_movimm(start+i*4+8,addr);
4559         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4560         emit_cmpimm(s1l,1);
4561         if(s1h>=0) emit_mov(addr,ntaddr);
4562         emit_cmovl_reg(alt,addr);
4563         if(s1h>=0) {
4564           emit_test(s1h,s1h);
4565           emit_cmovne_reg(ntaddr,addr);
4566           emit_cmovs_reg(alt,addr);
4567         }
4568       }
4569       if((opcode[i]&0x2f)==7) // BGTZ
4570       {
4571         //emit_movimm(ba[i],addr);
4572         //emit_movimm(start+i*4+8,ntaddr);
4573         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4574         emit_cmpimm(s1l,1);
4575         if(s1h>=0) emit_mov(addr,alt);
4576         emit_cmovl_reg(ntaddr,addr);
4577         if(s1h>=0) {
4578           emit_test(s1h,s1h);
4579           emit_cmovne_reg(alt,addr);
4580           emit_cmovs_reg(ntaddr,addr);
4581         }
4582       }
4583       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4584       {
4585         //emit_movimm(ba[i],alt);
4586         //emit_movimm(start+i*4+8,addr);
4587         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4588         if(s1h>=0) emit_test(s1h,s1h);
4589         else emit_test(s1l,s1l);
4590         emit_cmovs_reg(alt,addr);
4591       }
4592       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4593       {
4594         //emit_movimm(ba[i],addr);
4595         //emit_movimm(start+i*4+8,alt);
4596         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4597         if(s1h>=0) emit_test(s1h,s1h);
4598         else emit_test(s1l,s1l);
4599         emit_cmovs_reg(alt,addr);
4600       }
4601       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4602         if(source[i]&0x10000) // BC1T
4603         {
4604           //emit_movimm(ba[i],alt);
4605           //emit_movimm(start+i*4+8,addr);
4606           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4607           emit_testimm(s1l,0x800000);
4608           emit_cmovne_reg(alt,addr);
4609         }
4610         else // BC1F
4611         {
4612           //emit_movimm(ba[i],addr);
4613           //emit_movimm(start+i*4+8,alt);
4614           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4615           emit_testimm(s1l,0x800000);
4616           emit_cmovne_reg(alt,addr);
4617         }
4618       }
4619       emit_writeword(addr,(int)&pcaddr);
4620     }
4621     else
4622     if(itype[i]==RJUMP)
4623     {
4624       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4625       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4626         r=get_reg(branch_regs[i].regmap,RTEMP);
4627       }
4628       emit_writeword(r,(int)&pcaddr);
4629     }
4630     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
4631   }
4632   // Update cycle count
4633   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4634   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4635   emit_call((int)cc_interrupt);
4636   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4637   if(stubs[n][6]==TAKEN) {
4638     if(internal_branch(branch_regs[i].is32,ba[i]))
4639       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4640     else if(itype[i]==RJUMP) {
4641       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4642         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4643       else
4644         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4645     }
4646   }else if(stubs[n][6]==NOTTAKEN) {
4647     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4648     else load_all_regs(branch_regs[i].regmap);
4649   }else if(stubs[n][6]==NULLDS) {
4650     // Delay slot instruction is nullified ("likely" branch)
4651     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4652     else load_all_regs(regs[i].regmap);
4653   }else{
4654     load_all_regs(branch_regs[i].regmap);
4655   }
4656   emit_jmp(stubs[n][2]); // return address
4657
4658   /* This works but uses a lot of memory...
4659   emit_readword((int)&last_count,ECX);
4660   emit_add(HOST_CCREG,ECX,EAX);
4661   emit_writeword(EAX,(int)&Count);
4662   emit_call((int)gen_interupt);
4663   emit_readword((int)&Count,HOST_CCREG);
4664   emit_readword((int)&next_interupt,EAX);
4665   emit_readword((int)&pending_exception,EBX);
4666   emit_writeword(EAX,(int)&last_count);
4667   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4668   emit_test(EBX,EBX);
4669   int jne_instr=(int)out;
4670   emit_jne(0);
4671   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4672   load_all_regs(branch_regs[i].regmap);
4673   emit_jmp(stubs[n][2]); // return address
4674   set_jump_target(jne_instr,(int)out);
4675   emit_readword((int)&pcaddr,EAX);
4676   // Call get_addr_ht instead of doing the hash table here.
4677   // This code is executed infrequently and takes up a lot of space
4678   // so smaller is better.
4679   emit_storereg(CCREG,HOST_CCREG);
4680   emit_pushreg(EAX);
4681   emit_call((int)get_addr_ht);
4682   emit_loadreg(CCREG,HOST_CCREG);
4683   emit_addimm(ESP,4,ESP);
4684   emit_jmpreg(EAX);*/
4685 }
4686
4687 static void add_to_linker(int addr,int target,int ext)
4688 {
4689   link_addr[linkcount][0]=addr;
4690   link_addr[linkcount][1]=target;
4691   link_addr[linkcount][2]=ext;
4692   linkcount++;
4693 }
4694
4695 static void ujump_assemble_write_ra(int i)
4696 {
4697   int rt;
4698   unsigned int return_address;
4699   rt=get_reg(branch_regs[i].regmap,31);
4700   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4701   //assert(rt>=0);
4702   return_address=start+i*4+8;
4703   if(rt>=0) {
4704     #ifdef USE_MINI_HT
4705     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
4706       int temp=-1; // note: must be ds-safe
4707       #ifdef HOST_TEMPREG
4708       temp=HOST_TEMPREG;
4709       #endif
4710       if(temp>=0) do_miniht_insert(return_address,rt,temp);
4711       else emit_movimm(return_address,rt);
4712     }
4713     else
4714     #endif
4715     {
4716       #ifdef REG_PREFETCH
4717       if(temp>=0)
4718       {
4719         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4720       }
4721       #endif
4722       emit_movimm(return_address,rt); // PC into link register
4723       #ifdef IMM_PREFETCH
4724       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4725       #endif
4726     }
4727   }
4728 }
4729
4730 void ujump_assemble(int i,struct regstat *i_regs)
4731 {
4732   int ra_done=0;
4733   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4734   address_generation(i+1,i_regs,regs[i].regmap_entry);
4735   #ifdef REG_PREFETCH
4736   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4737   if(rt1[i]==31&&temp>=0)
4738   {
4739     signed char *i_regmap=i_regs->regmap;
4740     int return_address=start+i*4+8;
4741     if(get_reg(branch_regs[i].regmap,31)>0)
4742     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4743   }
4744   #endif
4745   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4746     ujump_assemble_write_ra(i); // writeback ra for DS
4747     ra_done=1;
4748   }
4749   ds_assemble(i+1,i_regs);
4750   uint64_t bc_unneeded=branch_regs[i].u;
4751   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4752   bc_unneeded|=1|(1LL<<rt1[i]);
4753   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4754   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4755                 bc_unneeded,bc_unneeded_upper);
4756   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4757   if(!ra_done&&rt1[i]==31)
4758     ujump_assemble_write_ra(i);
4759   int cc,adj;
4760   cc=get_reg(branch_regs[i].regmap,CCREG);
4761   assert(cc==HOST_CCREG);
4762   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4763   #ifdef REG_PREFETCH
4764   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4765   #endif
4766   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4767   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4768   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4769   if(internal_branch(branch_regs[i].is32,ba[i]))
4770     assem_debug("branch: internal\n");
4771   else
4772     assem_debug("branch: external\n");
4773   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
4774     ds_assemble_entry(i);
4775   }
4776   else {
4777     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
4778     emit_jmp(0);
4779   }
4780 }
4781
4782 static void rjump_assemble_write_ra(int i)
4783 {
4784   int rt,return_address;
4785   assert(rt1[i+1]!=rt1[i]);
4786   assert(rt2[i+1]!=rt1[i]);
4787   rt=get_reg(branch_regs[i].regmap,rt1[i]);
4788   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4789   assert(rt>=0);
4790   return_address=start+i*4+8;
4791   #ifdef REG_PREFETCH
4792   if(temp>=0)
4793   {
4794     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4795   }
4796   #endif
4797   emit_movimm(return_address,rt); // PC into link register
4798   #ifdef IMM_PREFETCH
4799   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4800   #endif
4801 }
4802
4803 void rjump_assemble(int i,struct regstat *i_regs)
4804 {
4805   int temp;
4806   int rs,cc;
4807   int ra_done=0;
4808   rs=get_reg(branch_regs[i].regmap,rs1[i]);
4809   assert(rs>=0);
4810   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4811     // Delay slot abuse, make a copy of the branch address register
4812     temp=get_reg(branch_regs[i].regmap,RTEMP);
4813     assert(temp>=0);
4814     assert(regs[i].regmap[temp]==RTEMP);
4815     emit_mov(rs,temp);
4816     rs=temp;
4817   }
4818   address_generation(i+1,i_regs,regs[i].regmap_entry);
4819   #ifdef REG_PREFETCH
4820   if(rt1[i]==31)
4821   {
4822     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4823       signed char *i_regmap=i_regs->regmap;
4824       int return_address=start+i*4+8;
4825       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4826     }
4827   }
4828   #endif
4829   #ifdef USE_MINI_HT
4830   if(rs1[i]==31) {
4831     int rh=get_reg(regs[i].regmap,RHASH);
4832     if(rh>=0) do_preload_rhash(rh);
4833   }
4834   #endif
4835   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4836     rjump_assemble_write_ra(i);
4837     ra_done=1;
4838   }
4839   ds_assemble(i+1,i_regs);
4840   uint64_t bc_unneeded=branch_regs[i].u;
4841   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4842   bc_unneeded|=1|(1LL<<rt1[i]);
4843   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4844   bc_unneeded&=~(1LL<<rs1[i]);
4845   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4846                 bc_unneeded,bc_unneeded_upper);
4847   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
4848   if(!ra_done&&rt1[i]!=0)
4849     rjump_assemble_write_ra(i);
4850   cc=get_reg(branch_regs[i].regmap,CCREG);
4851   assert(cc==HOST_CCREG);
4852   (void)cc;
4853   #ifdef USE_MINI_HT
4854   int rh=get_reg(branch_regs[i].regmap,RHASH);
4855   int ht=get_reg(branch_regs[i].regmap,RHTBL);
4856   if(rs1[i]==31) {
4857     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4858     do_preload_rhtbl(ht);
4859     do_rhash(rs,rh);
4860   }
4861   #endif
4862   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4863   #ifdef DESTRUCTIVE_WRITEBACK
4864   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
4865     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4866       emit_loadreg(rs1[i],rs);
4867     }
4868   }
4869   #endif
4870   #ifdef REG_PREFETCH
4871   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4872   #endif
4873   #ifdef USE_MINI_HT
4874   if(rs1[i]==31) {
4875     do_miniht_load(ht,rh);
4876   }
4877   #endif
4878   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4879   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4880   //assert(adj==0);
4881   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4882   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
4883   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4884     // special case for RFE
4885     emit_jmp(0);
4886   else
4887     emit_jns(0);
4888   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4889   #ifdef USE_MINI_HT
4890   if(rs1[i]==31) {
4891     do_miniht_jump(rs,rh,ht);
4892   }
4893   else
4894   #endif
4895   {
4896     //if(rs!=EAX) emit_mov(rs,EAX);
4897     //emit_jmp((int)jump_vaddr_eax);
4898     emit_jmp(jump_vaddr_reg[rs]);
4899   }
4900   /* Check hash table
4901   temp=!rs;
4902   emit_mov(rs,temp);
4903   emit_shrimm(rs,16,rs);
4904   emit_xor(temp,rs,rs);
4905   emit_movzwl_reg(rs,rs);
4906   emit_shlimm(rs,4,rs);
4907   emit_cmpmem_indexed((int)hash_table,rs,temp);
4908   emit_jne((int)out+14);
4909   emit_readword_indexed((int)hash_table+4,rs,rs);
4910   emit_jmpreg(rs);
4911   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
4912   emit_addimm_no_flags(8,rs);
4913   emit_jeq((int)out-17);
4914   // No hit on hash table, call compiler
4915   emit_pushreg(temp);
4916 //DEBUG >
4917 #ifdef DEBUG_CYCLE_COUNT
4918   emit_readword((int)&last_count,ECX);
4919   emit_add(HOST_CCREG,ECX,HOST_CCREG);
4920   emit_readword((int)&next_interupt,ECX);
4921   emit_writeword(HOST_CCREG,(int)&Count);
4922   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
4923   emit_writeword(ECX,(int)&last_count);
4924 #endif
4925 //DEBUG <
4926   emit_storereg(CCREG,HOST_CCREG);
4927   emit_call((int)get_addr);
4928   emit_loadreg(CCREG,HOST_CCREG);
4929   emit_addimm(ESP,4,ESP);
4930   emit_jmpreg(EAX);*/
4931   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4932   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4933   #endif
4934 }
4935
4936 void cjump_assemble(int i,struct regstat *i_regs)
4937 {
4938   signed char *i_regmap=i_regs->regmap;
4939   int cc;
4940   int match;
4941   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4942   assem_debug("match=%d\n",match);
4943   int s1h,s1l,s2h,s2l;
4944   int prev_cop1_usable=cop1_usable;
4945   int unconditional=0,nop=0;
4946   int only32=0;
4947   int invert=0;
4948   int internal=internal_branch(branch_regs[i].is32,ba[i]);
4949   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4950   if(!match) invert=1;
4951   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4952   if(i>(ba[i]-start)>>2) invert=1;
4953   #endif
4954
4955   if(ooo[i]) {
4956     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4957     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4958     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4959     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4960   }
4961   else {
4962     s1l=get_reg(i_regmap,rs1[i]);
4963     s1h=get_reg(i_regmap,rs1[i]|64);
4964     s2l=get_reg(i_regmap,rs2[i]);
4965     s2h=get_reg(i_regmap,rs2[i]|64);
4966   }
4967   if(rs1[i]==0&&rs2[i]==0)
4968   {
4969     if(opcode[i]&1) nop=1;
4970     else unconditional=1;
4971     //assert(opcode[i]!=5);
4972     //assert(opcode[i]!=7);
4973     //assert(opcode[i]!=0x15);
4974     //assert(opcode[i]!=0x17);
4975   }
4976   else if(rs1[i]==0)
4977   {
4978     s1l=s2l;s1h=s2h;
4979     s2l=s2h=-1;
4980     only32=(regs[i].was32>>rs2[i])&1;
4981   }
4982   else if(rs2[i]==0)
4983   {
4984     s2l=s2h=-1;
4985     only32=(regs[i].was32>>rs1[i])&1;
4986   }
4987   else {
4988     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
4989   }
4990
4991   if(ooo[i]) {
4992     // Out of order execution (delay slot first)
4993     //printf("OOOE\n");
4994     address_generation(i+1,i_regs,regs[i].regmap_entry);
4995     ds_assemble(i+1,i_regs);
4996     int adj;
4997     uint64_t bc_unneeded=branch_regs[i].u;
4998     uint64_t bc_unneeded_upper=branch_regs[i].uu;
4999     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5000     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5001     bc_unneeded|=1;
5002     bc_unneeded_upper|=1;
5003     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5004                   bc_unneeded,bc_unneeded_upper);
5005     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5006     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5007     cc=get_reg(branch_regs[i].regmap,CCREG);
5008     assert(cc==HOST_CCREG);
5009     if(unconditional)
5010       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5011     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5012     //assem_debug("cycle count (adj)\n");
5013     if(unconditional) {
5014       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5015       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5016         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5017         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5018         if(internal)
5019           assem_debug("branch: internal\n");
5020         else
5021           assem_debug("branch: external\n");
5022         if(internal&&is_ds[(ba[i]-start)>>2]) {
5023           ds_assemble_entry(i);
5024         }
5025         else {
5026           add_to_linker((int)out,ba[i],internal);
5027           emit_jmp(0);
5028         }
5029         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5030         if(((u_int)out)&7) emit_addnop(0);
5031         #endif
5032       }
5033     }
5034     else if(nop) {
5035       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5036       int jaddr=(int)out;
5037       emit_jns(0);
5038       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5039     }
5040     else {
5041       int taken=0,nottaken=0,nottaken1=0;
5042       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5043       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5044       if(!only32)
5045       {
5046         assert(s1h>=0);
5047         if(opcode[i]==4) // BEQ
5048         {
5049           if(s2h>=0) emit_cmp(s1h,s2h);
5050           else emit_test(s1h,s1h);
5051           nottaken1=(int)out;
5052           emit_jne(1);
5053         }
5054         if(opcode[i]==5) // BNE
5055         {
5056           if(s2h>=0) emit_cmp(s1h,s2h);
5057           else emit_test(s1h,s1h);
5058           if(invert) taken=(int)out;
5059           else add_to_linker((int)out,ba[i],internal);
5060           emit_jne(0);
5061         }
5062         if(opcode[i]==6) // BLEZ
5063         {
5064           emit_test(s1h,s1h);
5065           if(invert) taken=(int)out;
5066           else add_to_linker((int)out,ba[i],internal);
5067           emit_js(0);
5068           nottaken1=(int)out;
5069           emit_jne(1);
5070         }
5071         if(opcode[i]==7) // BGTZ
5072         {
5073           emit_test(s1h,s1h);
5074           nottaken1=(int)out;
5075           emit_js(1);
5076           if(invert) taken=(int)out;
5077           else add_to_linker((int)out,ba[i],internal);
5078           emit_jne(0);
5079         }
5080       } // if(!only32)
5081
5082       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5083       assert(s1l>=0);
5084       if(opcode[i]==4) // BEQ
5085       {
5086         if(s2l>=0) emit_cmp(s1l,s2l);
5087         else emit_test(s1l,s1l);
5088         if(invert){
5089           nottaken=(int)out;
5090           emit_jne(1);
5091         }else{
5092           add_to_linker((int)out,ba[i],internal);
5093           emit_jeq(0);
5094         }
5095       }
5096       if(opcode[i]==5) // BNE
5097       {
5098         if(s2l>=0) emit_cmp(s1l,s2l);
5099         else emit_test(s1l,s1l);
5100         if(invert){
5101           nottaken=(int)out;
5102           emit_jeq(1);
5103         }else{
5104           add_to_linker((int)out,ba[i],internal);
5105           emit_jne(0);
5106         }
5107       }
5108       if(opcode[i]==6) // BLEZ
5109       {
5110         emit_cmpimm(s1l,1);
5111         if(invert){
5112           nottaken=(int)out;
5113           emit_jge(1);
5114         }else{
5115           add_to_linker((int)out,ba[i],internal);
5116           emit_jl(0);
5117         }
5118       }
5119       if(opcode[i]==7) // BGTZ
5120       {
5121         emit_cmpimm(s1l,1);
5122         if(invert){
5123           nottaken=(int)out;
5124           emit_jl(1);
5125         }else{
5126           add_to_linker((int)out,ba[i],internal);
5127           emit_jge(0);
5128         }
5129       }
5130       if(invert) {
5131         if(taken) set_jump_target(taken,(int)out);
5132         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5133         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5134           if(adj) {
5135             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5136             add_to_linker((int)out,ba[i],internal);
5137           }else{
5138             emit_addnop(13);
5139             add_to_linker((int)out,ba[i],internal*2);
5140           }
5141           emit_jmp(0);
5142         }else
5143         #endif
5144         {
5145           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5146           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5147           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5148           if(internal)
5149             assem_debug("branch: internal\n");
5150           else
5151             assem_debug("branch: external\n");
5152           if(internal&&is_ds[(ba[i]-start)>>2]) {
5153             ds_assemble_entry(i);
5154           }
5155           else {
5156             add_to_linker((int)out,ba[i],internal);
5157             emit_jmp(0);
5158           }
5159         }
5160         set_jump_target(nottaken,(int)out);
5161       }
5162
5163       if(nottaken1) set_jump_target(nottaken1,(int)out);
5164       if(adj) {
5165         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5166       }
5167     } // (!unconditional)
5168   } // if(ooo)
5169   else
5170   {
5171     // In-order execution (branch first)
5172     //if(likely[i]) printf("IOL\n");
5173     //else
5174     //printf("IOE\n");
5175     int taken=0,nottaken=0,nottaken1=0;
5176     if(!unconditional&&!nop) {
5177       if(!only32)
5178       {
5179         assert(s1h>=0);
5180         if((opcode[i]&0x2f)==4) // BEQ
5181         {
5182           if(s2h>=0) emit_cmp(s1h,s2h);
5183           else emit_test(s1h,s1h);
5184           nottaken1=(int)out;
5185           emit_jne(2);
5186         }
5187         if((opcode[i]&0x2f)==5) // BNE
5188         {
5189           if(s2h>=0) emit_cmp(s1h,s2h);
5190           else emit_test(s1h,s1h);
5191           taken=(int)out;
5192           emit_jne(1);
5193         }
5194         if((opcode[i]&0x2f)==6) // BLEZ
5195         {
5196           emit_test(s1h,s1h);
5197           taken=(int)out;
5198           emit_js(1);
5199           nottaken1=(int)out;
5200           emit_jne(2);
5201         }
5202         if((opcode[i]&0x2f)==7) // BGTZ
5203         {
5204           emit_test(s1h,s1h);
5205           nottaken1=(int)out;
5206           emit_js(2);
5207           taken=(int)out;
5208           emit_jne(1);
5209         }
5210       } // if(!only32)
5211
5212       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5213       assert(s1l>=0);
5214       if((opcode[i]&0x2f)==4) // BEQ
5215       {
5216         if(s2l>=0) emit_cmp(s1l,s2l);
5217         else emit_test(s1l,s1l);
5218         nottaken=(int)out;
5219         emit_jne(2);
5220       }
5221       if((opcode[i]&0x2f)==5) // BNE
5222       {
5223         if(s2l>=0) emit_cmp(s1l,s2l);
5224         else emit_test(s1l,s1l);
5225         nottaken=(int)out;
5226         emit_jeq(2);
5227       }
5228       if((opcode[i]&0x2f)==6) // BLEZ
5229       {
5230         emit_cmpimm(s1l,1);
5231         nottaken=(int)out;
5232         emit_jge(2);
5233       }
5234       if((opcode[i]&0x2f)==7) // BGTZ
5235       {
5236         emit_cmpimm(s1l,1);
5237         nottaken=(int)out;
5238         emit_jl(2);
5239       }
5240     } // if(!unconditional)
5241     int adj;
5242     uint64_t ds_unneeded=branch_regs[i].u;
5243     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5244     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5245     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5246     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5247     ds_unneeded|=1;
5248     ds_unneeded_upper|=1;
5249     // branch taken
5250     if(!nop) {
5251       if(taken) set_jump_target(taken,(int)out);
5252       assem_debug("1:\n");
5253       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5254                     ds_unneeded,ds_unneeded_upper);
5255       // load regs
5256       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5257       address_generation(i+1,&branch_regs[i],0);
5258       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5259       ds_assemble(i+1,&branch_regs[i]);
5260       cc=get_reg(branch_regs[i].regmap,CCREG);
5261       if(cc==-1) {
5262         emit_loadreg(CCREG,cc=HOST_CCREG);
5263         // CHECK: Is the following instruction (fall thru) allocated ok?
5264       }
5265       assert(cc==HOST_CCREG);
5266       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5267       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5268       assem_debug("cycle count (adj)\n");
5269       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5270       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5271       if(internal)
5272         assem_debug("branch: internal\n");
5273       else
5274         assem_debug("branch: external\n");
5275       if(internal&&is_ds[(ba[i]-start)>>2]) {
5276         ds_assemble_entry(i);
5277       }
5278       else {
5279         add_to_linker((int)out,ba[i],internal);
5280         emit_jmp(0);
5281       }
5282     }
5283     // branch not taken
5284     cop1_usable=prev_cop1_usable;
5285     if(!unconditional) {
5286       if(nottaken1) set_jump_target(nottaken1,(int)out);
5287       set_jump_target(nottaken,(int)out);
5288       assem_debug("2:\n");
5289       if(!likely[i]) {
5290         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5291                       ds_unneeded,ds_unneeded_upper);
5292         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5293         address_generation(i+1,&branch_regs[i],0);
5294         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5295         ds_assemble(i+1,&branch_regs[i]);
5296       }
5297       cc=get_reg(branch_regs[i].regmap,CCREG);
5298       if(cc==-1&&!likely[i]) {
5299         // Cycle count isn't in a register, temporarily load it then write it out
5300         emit_loadreg(CCREG,HOST_CCREG);
5301         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5302         int jaddr=(int)out;
5303         emit_jns(0);
5304         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5305         emit_storereg(CCREG,HOST_CCREG);
5306       }
5307       else{
5308         cc=get_reg(i_regmap,CCREG);
5309         assert(cc==HOST_CCREG);
5310         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5311         int jaddr=(int)out;
5312         emit_jns(0);
5313         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5314       }
5315     }
5316   }
5317 }
5318
5319 void sjump_assemble(int i,struct regstat *i_regs)
5320 {
5321   signed char *i_regmap=i_regs->regmap;
5322   int cc;
5323   int match;
5324   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5325   assem_debug("smatch=%d\n",match);
5326   int s1h,s1l;
5327   int prev_cop1_usable=cop1_usable;
5328   int unconditional=0,nevertaken=0;
5329   int only32=0;
5330   int invert=0;
5331   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5332   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5333   if(!match) invert=1;
5334   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5335   if(i>(ba[i]-start)>>2) invert=1;
5336   #endif
5337
5338   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5339   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5340
5341   if(ooo[i]) {
5342     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5343     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5344   }
5345   else {
5346     s1l=get_reg(i_regmap,rs1[i]);
5347     s1h=get_reg(i_regmap,rs1[i]|64);
5348   }
5349   if(rs1[i]==0)
5350   {
5351     if(opcode2[i]&1) unconditional=1;
5352     else nevertaken=1;
5353     // These are never taken (r0 is never less than zero)
5354     //assert(opcode2[i]!=0);
5355     //assert(opcode2[i]!=2);
5356     //assert(opcode2[i]!=0x10);
5357     //assert(opcode2[i]!=0x12);
5358   }
5359   else {
5360     only32=(regs[i].was32>>rs1[i])&1;
5361   }
5362
5363   if(ooo[i]) {
5364     // Out of order execution (delay slot first)
5365     //printf("OOOE\n");
5366     address_generation(i+1,i_regs,regs[i].regmap_entry);
5367     ds_assemble(i+1,i_regs);
5368     int adj;
5369     uint64_t bc_unneeded=branch_regs[i].u;
5370     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5371     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5372     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5373     bc_unneeded|=1;
5374     bc_unneeded_upper|=1;
5375     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5376                   bc_unneeded,bc_unneeded_upper);
5377     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5378     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5379     if(rt1[i]==31) {
5380       int rt,return_address;
5381       rt=get_reg(branch_regs[i].regmap,31);
5382       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5383       if(rt>=0) {
5384         // Save the PC even if the branch is not taken
5385         return_address=start+i*4+8;
5386         emit_movimm(return_address,rt); // PC into link register
5387         #ifdef IMM_PREFETCH
5388         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5389         #endif
5390       }
5391     }
5392     cc=get_reg(branch_regs[i].regmap,CCREG);
5393     assert(cc==HOST_CCREG);
5394     if(unconditional)
5395       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5396     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5397     assem_debug("cycle count (adj)\n");
5398     if(unconditional) {
5399       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5400       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5401         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5402         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5403         if(internal)
5404           assem_debug("branch: internal\n");
5405         else
5406           assem_debug("branch: external\n");
5407         if(internal&&is_ds[(ba[i]-start)>>2]) {
5408           ds_assemble_entry(i);
5409         }
5410         else {
5411           add_to_linker((int)out,ba[i],internal);
5412           emit_jmp(0);
5413         }
5414         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5415         if(((u_int)out)&7) emit_addnop(0);
5416         #endif
5417       }
5418     }
5419     else if(nevertaken) {
5420       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5421       int jaddr=(int)out;
5422       emit_jns(0);
5423       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5424     }
5425     else {
5426       int nottaken=0;
5427       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5428       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5429       if(!only32)
5430       {
5431         assert(s1h>=0);
5432         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5433         {
5434           emit_test(s1h,s1h);
5435           if(invert){
5436             nottaken=(int)out;
5437             emit_jns(1);
5438           }else{
5439             add_to_linker((int)out,ba[i],internal);
5440             emit_js(0);
5441           }
5442         }
5443         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5444         {
5445           emit_test(s1h,s1h);
5446           if(invert){
5447             nottaken=(int)out;
5448             emit_js(1);
5449           }else{
5450             add_to_linker((int)out,ba[i],internal);
5451             emit_jns(0);
5452           }
5453         }
5454       } // if(!only32)
5455       else
5456       {
5457         assert(s1l>=0);
5458         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5459         {
5460           emit_test(s1l,s1l);
5461           if(invert){
5462             nottaken=(int)out;
5463             emit_jns(1);
5464           }else{
5465             add_to_linker((int)out,ba[i],internal);
5466             emit_js(0);
5467           }
5468         }
5469         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5470         {
5471           emit_test(s1l,s1l);
5472           if(invert){
5473             nottaken=(int)out;
5474             emit_js(1);
5475           }else{
5476             add_to_linker((int)out,ba[i],internal);
5477             emit_jns(0);
5478           }
5479         }
5480       } // if(!only32)
5481
5482       if(invert) {
5483         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5484         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5485           if(adj) {
5486             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5487             add_to_linker((int)out,ba[i],internal);
5488           }else{
5489             emit_addnop(13);
5490             add_to_linker((int)out,ba[i],internal*2);
5491           }
5492           emit_jmp(0);
5493         }else
5494         #endif
5495         {
5496           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5497           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5498           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5499           if(internal)
5500             assem_debug("branch: internal\n");
5501           else
5502             assem_debug("branch: external\n");
5503           if(internal&&is_ds[(ba[i]-start)>>2]) {
5504             ds_assemble_entry(i);
5505           }
5506           else {
5507             add_to_linker((int)out,ba[i],internal);
5508             emit_jmp(0);
5509           }
5510         }
5511         set_jump_target(nottaken,(int)out);
5512       }
5513
5514       if(adj) {
5515         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5516       }
5517     } // (!unconditional)
5518   } // if(ooo)
5519   else
5520   {
5521     // In-order execution (branch first)
5522     //printf("IOE\n");
5523     int nottaken=0;
5524     if(rt1[i]==31) {
5525       int rt,return_address;
5526       rt=get_reg(branch_regs[i].regmap,31);
5527       if(rt>=0) {
5528         // Save the PC even if the branch is not taken
5529         return_address=start+i*4+8;
5530         emit_movimm(return_address,rt); // PC into link register
5531         #ifdef IMM_PREFETCH
5532         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5533         #endif
5534       }
5535     }
5536     if(!unconditional) {
5537       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5538       if(!only32)
5539       {
5540         assert(s1h>=0);
5541         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5542         {
5543           emit_test(s1h,s1h);
5544           nottaken=(int)out;
5545           emit_jns(1);
5546         }
5547         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5548         {
5549           emit_test(s1h,s1h);
5550           nottaken=(int)out;
5551           emit_js(1);
5552         }
5553       } // if(!only32)
5554       else
5555       {
5556         assert(s1l>=0);
5557         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5558         {
5559           emit_test(s1l,s1l);
5560           nottaken=(int)out;
5561           emit_jns(1);
5562         }
5563         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5564         {
5565           emit_test(s1l,s1l);
5566           nottaken=(int)out;
5567           emit_js(1);
5568         }
5569       }
5570     } // if(!unconditional)
5571     int adj;
5572     uint64_t ds_unneeded=branch_regs[i].u;
5573     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5574     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5575     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5576     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5577     ds_unneeded|=1;
5578     ds_unneeded_upper|=1;
5579     // branch taken
5580     if(!nevertaken) {
5581       //assem_debug("1:\n");
5582       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5583                     ds_unneeded,ds_unneeded_upper);
5584       // load regs
5585       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5586       address_generation(i+1,&branch_regs[i],0);
5587       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5588       ds_assemble(i+1,&branch_regs[i]);
5589       cc=get_reg(branch_regs[i].regmap,CCREG);
5590       if(cc==-1) {
5591         emit_loadreg(CCREG,cc=HOST_CCREG);
5592         // CHECK: Is the following instruction (fall thru) allocated ok?
5593       }
5594       assert(cc==HOST_CCREG);
5595       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5596       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5597       assem_debug("cycle count (adj)\n");
5598       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5599       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5600       if(internal)
5601         assem_debug("branch: internal\n");
5602       else
5603         assem_debug("branch: external\n");
5604       if(internal&&is_ds[(ba[i]-start)>>2]) {
5605         ds_assemble_entry(i);
5606       }
5607       else {
5608         add_to_linker((int)out,ba[i],internal);
5609         emit_jmp(0);
5610       }
5611     }
5612     // branch not taken
5613     cop1_usable=prev_cop1_usable;
5614     if(!unconditional) {
5615       set_jump_target(nottaken,(int)out);
5616       assem_debug("1:\n");
5617       if(!likely[i]) {
5618         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5619                       ds_unneeded,ds_unneeded_upper);
5620         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5621         address_generation(i+1,&branch_regs[i],0);
5622         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5623         ds_assemble(i+1,&branch_regs[i]);
5624       }
5625       cc=get_reg(branch_regs[i].regmap,CCREG);
5626       if(cc==-1&&!likely[i]) {
5627         // Cycle count isn't in a register, temporarily load it then write it out
5628         emit_loadreg(CCREG,HOST_CCREG);
5629         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5630         int jaddr=(int)out;
5631         emit_jns(0);
5632         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5633         emit_storereg(CCREG,HOST_CCREG);
5634       }
5635       else{
5636         cc=get_reg(i_regmap,CCREG);
5637         assert(cc==HOST_CCREG);
5638         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5639         int jaddr=(int)out;
5640         emit_jns(0);
5641         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5642       }
5643     }
5644   }
5645 }
5646
5647 void fjump_assemble(int i,struct regstat *i_regs)
5648 {
5649   signed char *i_regmap=i_regs->regmap;
5650   int cc;
5651   int match;
5652   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5653   assem_debug("fmatch=%d\n",match);
5654   int fs,cs;
5655   int eaddr;
5656   int invert=0;
5657   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5658   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5659   if(!match) invert=1;
5660   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5661   if(i>(ba[i]-start)>>2) invert=1;
5662   #endif
5663
5664   if(ooo[i]) {
5665     fs=get_reg(branch_regs[i].regmap,FSREG);
5666     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5667   }
5668   else {
5669     fs=get_reg(i_regmap,FSREG);
5670   }
5671
5672   // Check cop1 unusable
5673   if(!cop1_usable) {
5674     cs=get_reg(i_regmap,CSREG);
5675     assert(cs>=0);
5676     emit_testimm(cs,0x20000000);
5677     eaddr=(int)out;
5678     emit_jeq(0);
5679     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5680     cop1_usable=1;
5681   }
5682
5683   if(ooo[i]) {
5684     // Out of order execution (delay slot first)
5685     //printf("OOOE\n");
5686     ds_assemble(i+1,i_regs);
5687     int adj;
5688     uint64_t bc_unneeded=branch_regs[i].u;
5689     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5690     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5691     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5692     bc_unneeded|=1;
5693     bc_unneeded_upper|=1;
5694     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5695                   bc_unneeded,bc_unneeded_upper);
5696     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5697     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5698     cc=get_reg(branch_regs[i].regmap,CCREG);
5699     assert(cc==HOST_CCREG);
5700     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5701     assem_debug("cycle count (adj)\n");
5702     if(1) {
5703       int nottaken=0;
5704       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5705       if(1) {
5706         assert(fs>=0);
5707         emit_testimm(fs,0x800000);
5708         if(source[i]&0x10000) // BC1T
5709         {
5710           if(invert){
5711             nottaken=(int)out;
5712             emit_jeq(1);
5713           }else{
5714             add_to_linker((int)out,ba[i],internal);
5715             emit_jne(0);
5716           }
5717         }
5718         else // BC1F
5719           if(invert){
5720             nottaken=(int)out;
5721             emit_jne(1);
5722           }else{
5723             add_to_linker((int)out,ba[i],internal);
5724             emit_jeq(0);
5725           }
5726         {
5727         }
5728       } // if(!only32)
5729
5730       if(invert) {
5731         if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5732         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5733         else if(match) emit_addnop(13);
5734         #endif
5735         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5736         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5737         if(internal)
5738           assem_debug("branch: internal\n");
5739         else
5740           assem_debug("branch: external\n");
5741         if(internal&&is_ds[(ba[i]-start)>>2]) {
5742           ds_assemble_entry(i);
5743         }
5744         else {
5745           add_to_linker((int)out,ba[i],internal);
5746           emit_jmp(0);
5747         }
5748         set_jump_target(nottaken,(int)out);
5749       }
5750
5751       if(adj) {
5752         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5753       }
5754     } // (!unconditional)
5755   } // if(ooo)
5756   else
5757   {
5758     // In-order execution (branch first)
5759     //printf("IOE\n");
5760     int nottaken=0;
5761     if(1) {
5762       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5763       if(1) {
5764         assert(fs>=0);
5765         emit_testimm(fs,0x800000);
5766         if(source[i]&0x10000) // BC1T
5767         {
5768           nottaken=(int)out;
5769           emit_jeq(1);
5770         }
5771         else // BC1F
5772         {
5773           nottaken=(int)out;
5774           emit_jne(1);
5775         }
5776       }
5777     } // if(!unconditional)
5778     int adj;
5779     uint64_t ds_unneeded=branch_regs[i].u;
5780     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5781     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5782     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5783     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5784     ds_unneeded|=1;
5785     ds_unneeded_upper|=1;
5786     // branch taken
5787     //assem_debug("1:\n");
5788     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5789                   ds_unneeded,ds_unneeded_upper);
5790     // load regs
5791     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5792     address_generation(i+1,&branch_regs[i],0);
5793     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5794     ds_assemble(i+1,&branch_regs[i]);
5795     cc=get_reg(branch_regs[i].regmap,CCREG);
5796     if(cc==-1) {
5797       emit_loadreg(CCREG,cc=HOST_CCREG);
5798       // CHECK: Is the following instruction (fall thru) allocated ok?
5799     }
5800     assert(cc==HOST_CCREG);
5801     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5802     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5803     assem_debug("cycle count (adj)\n");
5804     if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5805     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5806     if(internal)
5807       assem_debug("branch: internal\n");
5808     else
5809       assem_debug("branch: external\n");
5810     if(internal&&is_ds[(ba[i]-start)>>2]) {
5811       ds_assemble_entry(i);
5812     }
5813     else {
5814       add_to_linker((int)out,ba[i],internal);
5815       emit_jmp(0);
5816     }
5817
5818     // branch not taken
5819     if(1) { // <- FIXME (don't need this)
5820       set_jump_target(nottaken,(int)out);
5821       assem_debug("1:\n");
5822       if(!likely[i]) {
5823         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5824                       ds_unneeded,ds_unneeded_upper);
5825         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5826         address_generation(i+1,&branch_regs[i],0);
5827         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5828         ds_assemble(i+1,&branch_regs[i]);
5829       }
5830       cc=get_reg(branch_regs[i].regmap,CCREG);
5831       if(cc==-1&&!likely[i]) {
5832         // Cycle count isn't in a register, temporarily load it then write it out
5833         emit_loadreg(CCREG,HOST_CCREG);
5834         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5835         int jaddr=(int)out;
5836         emit_jns(0);
5837         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5838         emit_storereg(CCREG,HOST_CCREG);
5839       }
5840       else{
5841         cc=get_reg(i_regmap,CCREG);
5842         assert(cc==HOST_CCREG);
5843         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5844         int jaddr=(int)out;
5845         emit_jns(0);
5846         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5847       }
5848     }
5849   }
5850 }
5851
5852 static void pagespan_assemble(int i,struct regstat *i_regs)
5853 {
5854   int s1l=get_reg(i_regs->regmap,rs1[i]);
5855   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
5856   int s2l=get_reg(i_regs->regmap,rs2[i]);
5857   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
5858   int taken=0;
5859   int nottaken=0;
5860   int unconditional=0;
5861   if(rs1[i]==0)
5862   {
5863     s1l=s2l;s1h=s2h;
5864     s2l=s2h=-1;
5865   }
5866   else if(rs2[i]==0)
5867   {
5868     s2l=s2h=-1;
5869   }
5870   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
5871     s1h=s2h=-1;
5872   }
5873   int hr=0;
5874   int addr=-1,alt=-1,ntaddr=-1;
5875   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5876   else {
5877     while(hr<HOST_REGS)
5878     {
5879       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5880          (i_regs->regmap[hr]&63)!=rs1[i] &&
5881          (i_regs->regmap[hr]&63)!=rs2[i] )
5882       {
5883         addr=hr++;break;
5884       }
5885       hr++;
5886     }
5887   }
5888   while(hr<HOST_REGS)
5889   {
5890     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5891        (i_regs->regmap[hr]&63)!=rs1[i] &&
5892        (i_regs->regmap[hr]&63)!=rs2[i] )
5893     {
5894       alt=hr++;break;
5895     }
5896     hr++;
5897   }
5898   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5899   {
5900     while(hr<HOST_REGS)
5901     {
5902       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5903          (i_regs->regmap[hr]&63)!=rs1[i] &&
5904          (i_regs->regmap[hr]&63)!=rs2[i] )
5905       {
5906         ntaddr=hr;break;
5907       }
5908       hr++;
5909     }
5910   }
5911   assert(hr<HOST_REGS);
5912   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5913     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
5914   }
5915   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5916   if(opcode[i]==2) // J
5917   {
5918     unconditional=1;
5919   }
5920   if(opcode[i]==3) // JAL
5921   {
5922     // TODO: mini_ht
5923     int rt=get_reg(i_regs->regmap,31);
5924     emit_movimm(start+i*4+8,rt);
5925     unconditional=1;
5926   }
5927   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5928   {
5929     emit_mov(s1l,addr);
5930     if(opcode2[i]==9) // JALR
5931     {
5932       int rt=get_reg(i_regs->regmap,rt1[i]);
5933       emit_movimm(start+i*4+8,rt);
5934     }
5935   }
5936   if((opcode[i]&0x3f)==4) // BEQ
5937   {
5938     if(rs1[i]==rs2[i])
5939     {
5940       unconditional=1;
5941     }
5942     else
5943     #ifdef HAVE_CMOV_IMM
5944     if(s1h<0) {
5945       if(s2l>=0) emit_cmp(s1l,s2l);
5946       else emit_test(s1l,s1l);
5947       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5948     }
5949     else
5950     #endif
5951     {
5952       assert(s1l>=0);
5953       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5954       if(s1h>=0) {
5955         if(s2h>=0) emit_cmp(s1h,s2h);
5956         else emit_test(s1h,s1h);
5957         emit_cmovne_reg(alt,addr);
5958       }
5959       if(s2l>=0) emit_cmp(s1l,s2l);
5960       else emit_test(s1l,s1l);
5961       emit_cmovne_reg(alt,addr);
5962     }
5963   }
5964   if((opcode[i]&0x3f)==5) // BNE
5965   {
5966     #ifdef HAVE_CMOV_IMM
5967     if(s1h<0) {
5968       if(s2l>=0) emit_cmp(s1l,s2l);
5969       else emit_test(s1l,s1l);
5970       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5971     }
5972     else
5973     #endif
5974     {
5975       assert(s1l>=0);
5976       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5977       if(s1h>=0) {
5978         if(s2h>=0) emit_cmp(s1h,s2h);
5979         else emit_test(s1h,s1h);
5980         emit_cmovne_reg(alt,addr);
5981       }
5982       if(s2l>=0) emit_cmp(s1l,s2l);
5983       else emit_test(s1l,s1l);
5984       emit_cmovne_reg(alt,addr);
5985     }
5986   }
5987   if((opcode[i]&0x3f)==0x14) // BEQL
5988   {
5989     if(s1h>=0) {
5990       if(s2h>=0) emit_cmp(s1h,s2h);
5991       else emit_test(s1h,s1h);
5992       nottaken=(int)out;
5993       emit_jne(0);
5994     }
5995     if(s2l>=0) emit_cmp(s1l,s2l);
5996     else emit_test(s1l,s1l);
5997     if(nottaken) set_jump_target(nottaken,(int)out);
5998     nottaken=(int)out;
5999     emit_jne(0);
6000   }
6001   if((opcode[i]&0x3f)==0x15) // BNEL
6002   {
6003     if(s1h>=0) {
6004       if(s2h>=0) emit_cmp(s1h,s2h);
6005       else emit_test(s1h,s1h);
6006       taken=(int)out;
6007       emit_jne(0);
6008     }
6009     if(s2l>=0) emit_cmp(s1l,s2l);
6010     else emit_test(s1l,s1l);
6011     nottaken=(int)out;
6012     emit_jeq(0);
6013     if(taken) set_jump_target(taken,(int)out);
6014   }
6015   if((opcode[i]&0x3f)==6) // BLEZ
6016   {
6017     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6018     emit_cmpimm(s1l,1);
6019     if(s1h>=0) emit_mov(addr,ntaddr);
6020     emit_cmovl_reg(alt,addr);
6021     if(s1h>=0) {
6022       emit_test(s1h,s1h);
6023       emit_cmovne_reg(ntaddr,addr);
6024       emit_cmovs_reg(alt,addr);
6025     }
6026   }
6027   if((opcode[i]&0x3f)==7) // BGTZ
6028   {
6029     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6030     emit_cmpimm(s1l,1);
6031     if(s1h>=0) emit_mov(addr,alt);
6032     emit_cmovl_reg(ntaddr,addr);
6033     if(s1h>=0) {
6034       emit_test(s1h,s1h);
6035       emit_cmovne_reg(alt,addr);
6036       emit_cmovs_reg(ntaddr,addr);
6037     }
6038   }
6039   if((opcode[i]&0x3f)==0x16) // BLEZL
6040   {
6041     assert((opcode[i]&0x3f)!=0x16);
6042   }
6043   if((opcode[i]&0x3f)==0x17) // BGTZL
6044   {
6045     assert((opcode[i]&0x3f)!=0x17);
6046   }
6047   assert(opcode[i]!=1); // BLTZ/BGEZ
6048
6049   //FIXME: Check CSREG
6050   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6051     if((source[i]&0x30000)==0) // BC1F
6052     {
6053       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6054       emit_testimm(s1l,0x800000);
6055       emit_cmovne_reg(alt,addr);
6056     }
6057     if((source[i]&0x30000)==0x10000) // BC1T
6058     {
6059       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6060       emit_testimm(s1l,0x800000);
6061       emit_cmovne_reg(alt,addr);
6062     }
6063     if((source[i]&0x30000)==0x20000) // BC1FL
6064     {
6065       emit_testimm(s1l,0x800000);
6066       nottaken=(int)out;
6067       emit_jne(0);
6068     }
6069     if((source[i]&0x30000)==0x30000) // BC1TL
6070     {
6071       emit_testimm(s1l,0x800000);
6072       nottaken=(int)out;
6073       emit_jeq(0);
6074     }
6075   }
6076
6077   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6078   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6079   if(likely[i]||unconditional)
6080   {
6081     emit_movimm(ba[i],HOST_BTREG);
6082   }
6083   else if(addr!=HOST_BTREG)
6084   {
6085     emit_mov(addr,HOST_BTREG);
6086   }
6087   void *branch_addr=out;
6088   emit_jmp(0);
6089   int target_addr=start+i*4+5;
6090   void *stub=out;
6091   void *compiled_target_addr=check_addr(target_addr);
6092   emit_extjump_ds((int)branch_addr,target_addr);
6093   if(compiled_target_addr) {
6094     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6095     add_link(target_addr,stub);
6096   }
6097   else set_jump_target((int)branch_addr,(int)stub);
6098   if(likely[i]) {
6099     // Not-taken path
6100     set_jump_target((int)nottaken,(int)out);
6101     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6102     void *branch_addr=out;
6103     emit_jmp(0);
6104     int target_addr=start+i*4+8;
6105     void *stub=out;
6106     void *compiled_target_addr=check_addr(target_addr);
6107     emit_extjump_ds((int)branch_addr,target_addr);
6108     if(compiled_target_addr) {
6109       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6110       add_link(target_addr,stub);
6111     }
6112     else set_jump_target((int)branch_addr,(int)stub);
6113   }
6114 }
6115
6116 // Assemble the delay slot for the above
6117 static void pagespan_ds()
6118 {
6119   assem_debug("initial delay slot:\n");
6120   u_int vaddr=start+1;
6121   u_int page=get_page(vaddr);
6122   u_int vpage=get_vpage(vaddr);
6123   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6124   do_dirty_stub_ds();
6125   ll_add(jump_in+page,vaddr,(void *)out);
6126   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6127   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6128     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6129   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6130     emit_writeword(HOST_BTREG,(int)&branch_target);
6131   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6132   address_generation(0,&regs[0],regs[0].regmap_entry);
6133   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6134     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6135   cop1_usable=0;
6136   is_delayslot=0;
6137   switch(itype[0]) {
6138     case ALU:
6139       alu_assemble(0,&regs[0]);break;
6140     case IMM16:
6141       imm16_assemble(0,&regs[0]);break;
6142     case SHIFT:
6143       shift_assemble(0,&regs[0]);break;
6144     case SHIFTIMM:
6145       shiftimm_assemble(0,&regs[0]);break;
6146     case LOAD:
6147       load_assemble(0,&regs[0]);break;
6148     case LOADLR:
6149       loadlr_assemble(0,&regs[0]);break;
6150     case STORE:
6151       store_assemble(0,&regs[0]);break;
6152     case STORELR:
6153       storelr_assemble(0,&regs[0]);break;
6154     case COP0:
6155       cop0_assemble(0,&regs[0]);break;
6156     case COP1:
6157       cop1_assemble(0,&regs[0]);break;
6158     case C1LS:
6159       c1ls_assemble(0,&regs[0]);break;
6160     case COP2:
6161       cop2_assemble(0,&regs[0]);break;
6162     case C2LS:
6163       c2ls_assemble(0,&regs[0]);break;
6164     case C2OP:
6165       c2op_assemble(0,&regs[0]);break;
6166     case FCONV:
6167       fconv_assemble(0,&regs[0]);break;
6168     case FLOAT:
6169       float_assemble(0,&regs[0]);break;
6170     case FCOMP:
6171       fcomp_assemble(0,&regs[0]);break;
6172     case MULTDIV:
6173       multdiv_assemble(0,&regs[0]);break;
6174     case MOV:
6175       mov_assemble(0,&regs[0]);break;
6176     case SYSCALL:
6177     case HLECALL:
6178     case INTCALL:
6179     case SPAN:
6180     case UJUMP:
6181     case RJUMP:
6182     case CJUMP:
6183     case SJUMP:
6184     case FJUMP:
6185       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
6186   }
6187   int btaddr=get_reg(regs[0].regmap,BTREG);
6188   if(btaddr<0) {
6189     btaddr=get_reg(regs[0].regmap,-1);
6190     emit_readword((int)&branch_target,btaddr);
6191   }
6192   assert(btaddr!=HOST_CCREG);
6193   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6194 #ifdef HOST_IMM8
6195   emit_movimm(start+4,HOST_TEMPREG);
6196   emit_cmp(btaddr,HOST_TEMPREG);
6197 #else
6198   emit_cmpimm(btaddr,start+4);
6199 #endif
6200   int branch=(int)out;
6201   emit_jeq(0);
6202   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6203   emit_jmp(jump_vaddr_reg[btaddr]);
6204   set_jump_target(branch,(int)out);
6205   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6206   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6207 }
6208
6209 // Basic liveness analysis for MIPS registers
6210 void unneeded_registers(int istart,int iend,int r)
6211 {
6212   int i;
6213   uint64_t u,uu,gte_u,b,bu,gte_bu;
6214   uint64_t temp_u,temp_uu,temp_gte_u=0;
6215   uint64_t tdep;
6216   uint64_t gte_u_unknown=0;
6217   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
6218     gte_u_unknown=~0ll;
6219   if(iend==slen-1) {
6220     u=1;uu=1;
6221     gte_u=gte_u_unknown;
6222   }else{
6223     u=unneeded_reg[iend+1];
6224     uu=unneeded_reg_upper[iend+1];
6225     u=1;uu=1;
6226     gte_u=gte_unneeded[iend+1];
6227   }
6228
6229   for (i=iend;i>=istart;i--)
6230   {
6231     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6232     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6233     {
6234       // If subroutine call, flag return address as a possible branch target
6235       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6236
6237       if(ba[i]<start || ba[i]>=(start+slen*4))
6238       {
6239         // Branch out of this block, flush all regs
6240         u=1;
6241         uu=1;
6242         gte_u=gte_u_unknown;
6243         /* Hexagon hack
6244         if(itype[i]==UJUMP&&rt1[i]==31)
6245         {
6246           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6247         }
6248         if(itype[i]==RJUMP&&rs1[i]==31)
6249         {
6250           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6251         }
6252         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6253           if(itype[i]==UJUMP&&rt1[i]==31)
6254           {
6255             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6256             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6257           }
6258           if(itype[i]==RJUMP&&rs1[i]==31)
6259           {
6260             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6261             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6262           }
6263         }*/
6264         branch_unneeded_reg[i]=u;
6265         branch_unneeded_reg_upper[i]=uu;
6266         // Merge in delay slot
6267         tdep=(~uu>>rt1[i+1])&1;
6268         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6269         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6270         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6271         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6272         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6273         u|=1;uu|=1;
6274         gte_u|=gte_rt[i+1];
6275         gte_u&=~gte_rs[i+1];
6276         // If branch is "likely" (and conditional)
6277         // then we skip the delay slot on the fall-thru path
6278         if(likely[i]) {
6279           if(i<slen-1) {
6280             u&=unneeded_reg[i+2];
6281             uu&=unneeded_reg_upper[i+2];
6282             gte_u&=gte_unneeded[i+2];
6283           }
6284           else
6285           {
6286             u=1;
6287             uu=1;
6288             gte_u=gte_u_unknown;
6289           }
6290         }
6291       }
6292       else
6293       {
6294         // Internal branch, flag target
6295         bt[(ba[i]-start)>>2]=1;
6296         if(ba[i]<=start+i*4) {
6297           // Backward branch
6298           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6299           {
6300             // Unconditional branch
6301             temp_u=1;temp_uu=1;
6302             temp_gte_u=0;
6303           } else {
6304             // Conditional branch (not taken case)
6305             temp_u=unneeded_reg[i+2];
6306             temp_uu=unneeded_reg_upper[i+2];
6307             temp_gte_u&=gte_unneeded[i+2];
6308           }
6309           // Merge in delay slot
6310           tdep=(~temp_uu>>rt1[i+1])&1;
6311           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6312           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6313           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6314           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6315           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6316           temp_u|=1;temp_uu|=1;
6317           temp_gte_u|=gte_rt[i+1];
6318           temp_gte_u&=~gte_rs[i+1];
6319           // If branch is "likely" (and conditional)
6320           // then we skip the delay slot on the fall-thru path
6321           if(likely[i]) {
6322             if(i<slen-1) {
6323               temp_u&=unneeded_reg[i+2];
6324               temp_uu&=unneeded_reg_upper[i+2];
6325               temp_gte_u&=gte_unneeded[i+2];
6326             }
6327             else
6328             {
6329               temp_u=1;
6330               temp_uu=1;
6331               temp_gte_u=gte_u_unknown;
6332             }
6333           }
6334           tdep=(~temp_uu>>rt1[i])&1;
6335           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6336           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6337           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6338           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6339           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6340           temp_u|=1;temp_uu|=1;
6341           temp_gte_u|=gte_rt[i];
6342           temp_gte_u&=~gte_rs[i];
6343           unneeded_reg[i]=temp_u;
6344           unneeded_reg_upper[i]=temp_uu;
6345           gte_unneeded[i]=temp_gte_u;
6346           // Only go three levels deep.  This recursion can take an
6347           // excessive amount of time if there are a lot of nested loops.
6348           if(r<2) {
6349             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6350           }else{
6351             unneeded_reg[(ba[i]-start)>>2]=1;
6352             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6353             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
6354           }
6355         } /*else*/ if(1) {
6356           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6357           {
6358             // Unconditional branch
6359             u=unneeded_reg[(ba[i]-start)>>2];
6360             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6361             gte_u=gte_unneeded[(ba[i]-start)>>2];
6362             branch_unneeded_reg[i]=u;
6363             branch_unneeded_reg_upper[i]=uu;
6364         //u=1;
6365         //uu=1;
6366         //branch_unneeded_reg[i]=u;
6367         //branch_unneeded_reg_upper[i]=uu;
6368             // Merge in delay slot
6369             tdep=(~uu>>rt1[i+1])&1;
6370             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6371             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6372             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6373             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6374             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6375             u|=1;uu|=1;
6376             gte_u|=gte_rt[i+1];
6377             gte_u&=~gte_rs[i+1];
6378           } else {
6379             // Conditional branch
6380             b=unneeded_reg[(ba[i]-start)>>2];
6381             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6382             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6383             branch_unneeded_reg[i]=b;
6384             branch_unneeded_reg_upper[i]=bu;
6385         //b=1;
6386         //bu=1;
6387         //branch_unneeded_reg[i]=b;
6388         //branch_unneeded_reg_upper[i]=bu;
6389             // Branch delay slot
6390             tdep=(~uu>>rt1[i+1])&1;
6391             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6392             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6393             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6394             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6395             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6396             b|=1;bu|=1;
6397             gte_bu|=gte_rt[i+1];
6398             gte_bu&=~gte_rs[i+1];
6399             // If branch is "likely" then we skip the
6400             // delay slot on the fall-thru path
6401             if(likely[i]) {
6402               u=b;
6403               uu=bu;
6404               gte_u=gte_bu;
6405               if(i<slen-1) {
6406                 u&=unneeded_reg[i+2];
6407                 uu&=unneeded_reg_upper[i+2];
6408                 gte_u&=gte_unneeded[i+2];
6409         //u=1;
6410         //uu=1;
6411               }
6412             } else {
6413               u&=b;
6414               uu&=bu;
6415               gte_u&=gte_bu;
6416         //u=1;
6417         //uu=1;
6418             }
6419             if(i<slen-1) {
6420               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6421               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6422         //branch_unneeded_reg[i]=1;
6423         //branch_unneeded_reg_upper[i]=1;
6424             } else {
6425               branch_unneeded_reg[i]=1;
6426               branch_unneeded_reg_upper[i]=1;
6427             }
6428           }
6429         }
6430       }
6431     }
6432     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6433     {
6434       // SYSCALL instruction (software interrupt)
6435       u=1;
6436       uu=1;
6437     }
6438     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6439     {
6440       // ERET instruction (return from interrupt)
6441       u=1;
6442       uu=1;
6443     }
6444     //u=uu=1; // DEBUG
6445     tdep=(~uu>>rt1[i])&1;
6446     // Written registers are unneeded
6447     u|=1LL<<rt1[i];
6448     u|=1LL<<rt2[i];
6449     uu|=1LL<<rt1[i];
6450     uu|=1LL<<rt2[i];
6451     gte_u|=gte_rt[i];
6452     // Accessed registers are needed
6453     u&=~(1LL<<rs1[i]);
6454     u&=~(1LL<<rs2[i]);
6455     uu&=~(1LL<<us1[i]);
6456     uu&=~(1LL<<us2[i]);
6457     gte_u&=~gte_rs[i];
6458     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
6459       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
6460     // Source-target dependencies
6461     uu&=~(tdep<<dep1[i]);
6462     uu&=~(tdep<<dep2[i]);
6463     // R0 is always unneeded
6464     u|=1;uu|=1;
6465     // Save it
6466     unneeded_reg[i]=u;
6467     unneeded_reg_upper[i]=uu;
6468     gte_unneeded[i]=gte_u;
6469     /*
6470     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6471     printf("U:");
6472     int r;
6473     for(r=1;r<=CCREG;r++) {
6474       if((unneeded_reg[i]>>r)&1) {
6475         if(r==HIREG) printf(" HI");
6476         else if(r==LOREG) printf(" LO");
6477         else printf(" r%d",r);
6478       }
6479     }
6480     printf(" UU:");
6481     for(r=1;r<=CCREG;r++) {
6482       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6483         if(r==HIREG) printf(" HI");
6484         else if(r==LOREG) printf(" LO");
6485         else printf(" r%d",r);
6486       }
6487     }
6488     printf("\n");*/
6489   }
6490   for (i=iend;i>=istart;i--)
6491   {
6492     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6493   }
6494 }
6495
6496 // Write back dirty registers as soon as we will no longer modify them,
6497 // so that we don't end up with lots of writes at the branches.
6498 void clean_registers(int istart,int iend,int wr)
6499 {
6500   int i;
6501   int r;
6502   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
6503   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
6504   if(iend==slen-1) {
6505     will_dirty_i=will_dirty_next=0;
6506     wont_dirty_i=wont_dirty_next=0;
6507   }else{
6508     will_dirty_i=will_dirty_next=will_dirty[iend+1];
6509     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
6510   }
6511   for (i=iend;i>=istart;i--)
6512   {
6513     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6514     {
6515       if(ba[i]<start || ba[i]>=(start+slen*4))
6516       {
6517         // Branch out of this block, flush all regs
6518         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6519         {
6520           // Unconditional branch
6521           will_dirty_i=0;
6522           wont_dirty_i=0;
6523           // Merge in delay slot (will dirty)
6524           for(r=0;r<HOST_REGS;r++) {
6525             if(r!=EXCLUDE_REG) {
6526               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6527               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6528               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6529               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6530               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6531               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6532               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6533               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6534               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6535               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6536               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6537               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6538               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6539               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6540             }
6541           }
6542         }
6543         else
6544         {
6545           // Conditional branch
6546           will_dirty_i=0;
6547           wont_dirty_i=wont_dirty_next;
6548           // Merge in delay slot (will dirty)
6549           for(r=0;r<HOST_REGS;r++) {
6550             if(r!=EXCLUDE_REG) {
6551               if(!likely[i]) {
6552                 // Might not dirty if likely branch is not taken
6553                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6554                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6555                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6556                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6557                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6558                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
6559                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6560                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6561                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6562                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6563                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6564                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6565                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6566                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6567               }
6568             }
6569           }
6570         }
6571         // Merge in delay slot (wont dirty)
6572         for(r=0;r<HOST_REGS;r++) {
6573           if(r!=EXCLUDE_REG) {
6574             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6575             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6576             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6577             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6578             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6579             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6580             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6581             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6582             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6583             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6584           }
6585         }
6586         if(wr) {
6587           #ifndef DESTRUCTIVE_WRITEBACK
6588           branch_regs[i].dirty&=wont_dirty_i;
6589           #endif
6590           branch_regs[i].dirty|=will_dirty_i;
6591         }
6592       }
6593       else
6594       {
6595         // Internal branch
6596         if(ba[i]<=start+i*4) {
6597           // Backward branch
6598           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6599           {
6600             // Unconditional branch
6601             temp_will_dirty=0;
6602             temp_wont_dirty=0;
6603             // Merge in delay slot (will dirty)
6604             for(r=0;r<HOST_REGS;r++) {
6605               if(r!=EXCLUDE_REG) {
6606                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6607                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6608                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6609                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6610                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6611                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6612                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6613                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6614                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6615                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6616                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6617                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6618                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6619                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6620               }
6621             }
6622           } else {
6623             // Conditional branch (not taken case)
6624             temp_will_dirty=will_dirty_next;
6625             temp_wont_dirty=wont_dirty_next;
6626             // Merge in delay slot (will dirty)
6627             for(r=0;r<HOST_REGS;r++) {
6628               if(r!=EXCLUDE_REG) {
6629                 if(!likely[i]) {
6630                   // Will not dirty if likely branch is not taken
6631                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6632                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6633                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6634                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6635                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6636                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
6637                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6638                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6639                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6640                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6641                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6642                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6643                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6644                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6645                 }
6646               }
6647             }
6648           }
6649           // Merge in delay slot (wont dirty)
6650           for(r=0;r<HOST_REGS;r++) {
6651             if(r!=EXCLUDE_REG) {
6652               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6653               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6654               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6655               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6656               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6657               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6658               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6659               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6660               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6661               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6662             }
6663           }
6664           // Deal with changed mappings
6665           if(i<iend) {
6666             for(r=0;r<HOST_REGS;r++) {
6667               if(r!=EXCLUDE_REG) {
6668                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
6669                   temp_will_dirty&=~(1<<r);
6670                   temp_wont_dirty&=~(1<<r);
6671                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6672                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6673                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6674                   } else {
6675                     temp_will_dirty|=1<<r;
6676                     temp_wont_dirty|=1<<r;
6677                   }
6678                 }
6679               }
6680             }
6681           }
6682           if(wr) {
6683             will_dirty[i]=temp_will_dirty;
6684             wont_dirty[i]=temp_wont_dirty;
6685             clean_registers((ba[i]-start)>>2,i-1,0);
6686           }else{
6687             // Limit recursion.  It can take an excessive amount
6688             // of time if there are a lot of nested loops.
6689             will_dirty[(ba[i]-start)>>2]=0;
6690             wont_dirty[(ba[i]-start)>>2]=-1;
6691           }
6692         }
6693         /*else*/ if(1)
6694         {
6695           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6696           {
6697             // Unconditional branch
6698             will_dirty_i=0;
6699             wont_dirty_i=0;
6700           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6701             for(r=0;r<HOST_REGS;r++) {
6702               if(r!=EXCLUDE_REG) {
6703                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6704                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
6705                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6706                 }
6707                 if(branch_regs[i].regmap[r]>=0) {
6708                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6709                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6710                 }
6711               }
6712             }
6713           //}
6714             // Merge in delay slot
6715             for(r=0;r<HOST_REGS;r++) {
6716               if(r!=EXCLUDE_REG) {
6717                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6718                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6719                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6720                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6721                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6722                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6723                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6724                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6725                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6726                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6727                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6728                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6729                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6730                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6731               }
6732             }
6733           } else {
6734             // Conditional branch
6735             will_dirty_i=will_dirty_next;
6736             wont_dirty_i=wont_dirty_next;
6737           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6738             for(r=0;r<HOST_REGS;r++) {
6739               if(r!=EXCLUDE_REG) {
6740                 signed char target_reg=branch_regs[i].regmap[r];
6741                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6742                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6743                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6744                 }
6745                 else if(target_reg>=0) {
6746                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6747                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6748                 }
6749                 // Treat delay slot as part of branch too
6750                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6751                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6752                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6753                 }
6754                 else
6755                 {
6756                   will_dirty[i+1]&=~(1<<r);
6757                 }*/
6758               }
6759             }
6760           //}
6761             // Merge in delay slot
6762             for(r=0;r<HOST_REGS;r++) {
6763               if(r!=EXCLUDE_REG) {
6764                 if(!likely[i]) {
6765                   // Might not dirty if likely branch is not taken
6766                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6767                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6768                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6769                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6770                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6771                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6772                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6773                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6774                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6775                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6776                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6777                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6778                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6779                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6780                 }
6781               }
6782             }
6783           }
6784           // Merge in delay slot (won't dirty)
6785           for(r=0;r<HOST_REGS;r++) {
6786             if(r!=EXCLUDE_REG) {
6787               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6788               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6789               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6790               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6791               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6792               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6793               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6794               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6795               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6796               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6797             }
6798           }
6799           if(wr) {
6800             #ifndef DESTRUCTIVE_WRITEBACK
6801             branch_regs[i].dirty&=wont_dirty_i;
6802             #endif
6803             branch_regs[i].dirty|=will_dirty_i;
6804           }
6805         }
6806       }
6807     }
6808     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6809     {
6810       // SYSCALL instruction (software interrupt)
6811       will_dirty_i=0;
6812       wont_dirty_i=0;
6813     }
6814     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6815     {
6816       // ERET instruction (return from interrupt)
6817       will_dirty_i=0;
6818       wont_dirty_i=0;
6819     }
6820     will_dirty_next=will_dirty_i;
6821     wont_dirty_next=wont_dirty_i;
6822     for(r=0;r<HOST_REGS;r++) {
6823       if(r!=EXCLUDE_REG) {
6824         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6825         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6826         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6827         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6828         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6829         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6830         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6831         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6832         if(i>istart) {
6833           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
6834           {
6835             // Don't store a register immediately after writing it,
6836             // may prevent dual-issue.
6837             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6838             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6839           }
6840         }
6841       }
6842     }
6843     // Save it
6844     will_dirty[i]=will_dirty_i;
6845     wont_dirty[i]=wont_dirty_i;
6846     // Mark registers that won't be dirtied as not dirty
6847     if(wr) {
6848       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6849       for(r=0;r<HOST_REGS;r++) {
6850         if((will_dirty_i>>r)&1) {
6851           printf(" r%d",r);
6852         }
6853       }
6854       printf("\n");*/
6855
6856       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
6857         regs[i].dirty|=will_dirty_i;
6858         #ifndef DESTRUCTIVE_WRITEBACK
6859         regs[i].dirty&=wont_dirty_i;
6860         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6861         {
6862           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
6863             for(r=0;r<HOST_REGS;r++) {
6864               if(r!=EXCLUDE_REG) {
6865                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6866                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6867                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6868               }
6869             }
6870           }
6871         }
6872         else
6873         {
6874           if(i<iend) {
6875             for(r=0;r<HOST_REGS;r++) {
6876               if(r!=EXCLUDE_REG) {
6877                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6878                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6879                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6880               }
6881             }
6882           }
6883         }
6884         #endif
6885       //}
6886     }
6887     // Deal with changed mappings
6888     temp_will_dirty=will_dirty_i;
6889     temp_wont_dirty=wont_dirty_i;
6890     for(r=0;r<HOST_REGS;r++) {
6891       if(r!=EXCLUDE_REG) {
6892         int nr;
6893         if(regs[i].regmap[r]==regmap_pre[i][r]) {
6894           if(wr) {
6895             #ifndef DESTRUCTIVE_WRITEBACK
6896             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6897             #endif
6898             regs[i].wasdirty|=will_dirty_i&(1<<r);
6899           }
6900         }
6901         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6902           // Register moved to a different register
6903           will_dirty_i&=~(1<<r);
6904           wont_dirty_i&=~(1<<r);
6905           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6906           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6907           if(wr) {
6908             #ifndef DESTRUCTIVE_WRITEBACK
6909             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6910             #endif
6911             regs[i].wasdirty|=will_dirty_i&(1<<r);
6912           }
6913         }
6914         else {
6915           will_dirty_i&=~(1<<r);
6916           wont_dirty_i&=~(1<<r);
6917           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6918             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6919             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6920           } else {
6921             wont_dirty_i|=1<<r;
6922             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6923           }
6924         }
6925       }
6926     }
6927   }
6928 }
6929
6930 #ifdef DISASM
6931   /* disassembly */
6932 void disassemble_inst(int i)
6933 {
6934     if (bt[i]) printf("*"); else printf(" ");
6935     switch(itype[i]) {
6936       case UJUMP:
6937         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6938       case CJUMP:
6939         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6940       case SJUMP:
6941         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6942       case FJUMP:
6943         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6944       case RJUMP:
6945         if (opcode[i]==0x9&&rt1[i]!=31)
6946           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6947         else
6948           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6949         break;
6950       case SPAN:
6951         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6952       case IMM16:
6953         if(opcode[i]==0xf) //LUI
6954           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6955         else
6956           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6957         break;
6958       case LOAD:
6959       case LOADLR:
6960         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6961         break;
6962       case STORE:
6963       case STORELR:
6964         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6965         break;
6966       case ALU:
6967       case SHIFT:
6968         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6969         break;
6970       case MULTDIV:
6971         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6972         break;
6973       case SHIFTIMM:
6974         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6975         break;
6976       case MOV:
6977         if((opcode2[i]&0x1d)==0x10)
6978           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6979         else if((opcode2[i]&0x1d)==0x11)
6980           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6981         else
6982           printf (" %x: %s\n",start+i*4,insn[i]);
6983         break;
6984       case COP0:
6985         if(opcode2[i]==0)
6986           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6987         else if(opcode2[i]==4)
6988           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6989         else printf (" %x: %s\n",start+i*4,insn[i]);
6990         break;
6991       case COP1:
6992         if(opcode2[i]<3)
6993           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6994         else if(opcode2[i]>3)
6995           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6996         else printf (" %x: %s\n",start+i*4,insn[i]);
6997         break;
6998       case COP2:
6999         if(opcode2[i]<3)
7000           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7001         else if(opcode2[i]>3)
7002           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7003         else printf (" %x: %s\n",start+i*4,insn[i]);
7004         break;
7005       case C1LS:
7006         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7007         break;
7008       case C2LS:
7009         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7010         break;
7011       case INTCALL:
7012         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7013         break;
7014       default:
7015         //printf (" %s %8x\n",insn[i],source[i]);
7016         printf (" %x: %s\n",start+i*4,insn[i]);
7017     }
7018 }
7019 #else
7020 static void disassemble_inst(int i) {}
7021 #endif // DISASM
7022
7023 #define DRC_TEST_VAL 0x74657374
7024
7025 static int new_dynarec_test(void)
7026 {
7027   int (*testfunc)(void) = (void *)out;
7028   void *beginning;
7029   int ret;
7030
7031   beginning = start_block();
7032   emit_movimm(DRC_TEST_VAL,0); // test
7033   emit_jmpreg(14);
7034   literal_pool(0);
7035   end_block(beginning);
7036   SysPrintf("testing if we can run recompiled code..\n");
7037   ret = testfunc();
7038   if (ret == DRC_TEST_VAL)
7039     SysPrintf("test passed.\n");
7040   else
7041     SysPrintf("test failed: %08x\n", ret);
7042   out=(u_char *)BASE_ADDR;
7043   return ret == DRC_TEST_VAL;
7044 }
7045
7046 // clear the state completely, instead of just marking
7047 // things invalid like invalidate_all_pages() does
7048 void new_dynarec_clear_full()
7049 {
7050   int n;
7051   out=(u_char *)BASE_ADDR;
7052   memset(invalid_code,1,sizeof(invalid_code));
7053   memset(hash_table,0xff,sizeof(hash_table));
7054   memset(mini_ht,-1,sizeof(mini_ht));
7055   memset(restore_candidate,0,sizeof(restore_candidate));
7056   memset(shadow,0,sizeof(shadow));
7057   copy=shadow;
7058   expirep=16384; // Expiry pointer, +2 blocks
7059   pending_exception=0;
7060   literalcount=0;
7061   stop_after_jal=0;
7062   inv_code_start=inv_code_end=~0;
7063   // TLB
7064   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7065   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7066   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7067 }
7068
7069 void new_dynarec_init()
7070 {
7071   SysPrintf("Init new dynarec\n");
7072
7073   // allocate/prepare a buffer for translation cache
7074   // see assem_arm.h for some explanation
7075 #if   defined(BASE_ADDR_FIXED)
7076   if (mmap (translation_cache, 1 << TARGET_SIZE_2,
7077             PROT_READ | PROT_WRITE | PROT_EXEC,
7078             MAP_PRIVATE | MAP_ANONYMOUS,
7079             -1, 0) != translation_cache) {
7080     SysPrintf("mmap() failed: %s\n", strerror(errno));
7081     SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
7082     abort();
7083   }
7084 #elif defined(BASE_ADDR_DYNAMIC)
7085   #ifdef VITA
7086   sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
7087   if (sceBlock < 0)
7088     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
7089   int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
7090   if (ret < 0)
7091     SysPrintf("sceKernelGetMemBlockBase failed\n");
7092   #else
7093   translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
7094             PROT_READ | PROT_WRITE | PROT_EXEC,
7095             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
7096   if (translation_cache == MAP_FAILED) {
7097     SysPrintf("mmap() failed: %s\n", strerror(errno));
7098     abort();
7099   }
7100   #endif
7101 #else
7102   #ifndef NO_WRITE_EXEC
7103   // not all systems allow execute in data segment by default
7104   if (mprotect((void *)BASE_ADDR, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
7105     SysPrintf("mprotect() failed: %s\n", strerror(errno));
7106   #endif
7107 #endif
7108   out=(u_char *)BASE_ADDR;
7109   cycle_multiplier=200;
7110   new_dynarec_clear_full();
7111 #ifdef HOST_IMM8
7112   // Copy this into local area so we don't have to put it in every literal pool
7113   invc_ptr=invalid_code;
7114 #endif
7115   arch_init();
7116   new_dynarec_test();
7117 #ifndef RAM_FIXED
7118   ram_offset=(u_int)rdram-0x80000000;
7119 #endif
7120   if (ram_offset!=0)
7121     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
7122 }
7123
7124 void new_dynarec_cleanup()
7125 {
7126   int n;
7127 #if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
7128   #ifdef VITA
7129   sceKernelFreeMemBlock(sceBlock);
7130   sceBlock = -1;
7131   #else
7132   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0)
7133     SysPrintf("munmap() failed\n");
7134   #endif
7135 #endif
7136   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7137   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7138   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7139   #ifdef ROM_COPY
7140   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
7141   #endif
7142 }
7143
7144 static u_int *get_source_start(u_int addr, u_int *limit)
7145 {
7146   if (addr < 0x00200000 ||
7147     (0xa0000000 <= addr && addr < 0xa0200000)) {
7148     // used for BIOS calls mostly?
7149     *limit = (addr&0xa0000000)|0x00200000;
7150     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7151   }
7152   else if (!Config.HLE && (
7153     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7154     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7155     // BIOS
7156     *limit = (addr & 0xfff00000) | 0x80000;
7157     return (u_int *)((u_int)psxR + (addr&0x7ffff));
7158   }
7159   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
7160     *limit = (addr & 0x80600000) + 0x00200000;
7161     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7162   }
7163   return NULL;
7164 }
7165
7166 static u_int scan_for_ret(u_int addr)
7167 {
7168   u_int limit = 0;
7169   u_int *mem;
7170
7171   mem = get_source_start(addr, &limit);
7172   if (mem == NULL)
7173     return addr;
7174
7175   if (limit > addr + 0x1000)
7176     limit = addr + 0x1000;
7177   for (; addr < limit; addr += 4, mem++) {
7178     if (*mem == 0x03e00008) // jr $ra
7179       return addr + 8;
7180   }
7181   return addr;
7182 }
7183
7184 struct savestate_block {
7185   uint32_t addr;
7186   uint32_t regflags;
7187 };
7188
7189 static int addr_cmp(const void *p1_, const void *p2_)
7190 {
7191   const struct savestate_block *p1 = p1_, *p2 = p2_;
7192   return p1->addr - p2->addr;
7193 }
7194
7195 int new_dynarec_save_blocks(void *save, int size)
7196 {
7197   struct savestate_block *blocks = save;
7198   int maxcount = size / sizeof(blocks[0]);
7199   struct savestate_block tmp_blocks[1024];
7200   struct ll_entry *head;
7201   int p, s, d, o, bcnt;
7202   u_int addr;
7203
7204   o = 0;
7205   for (p = 0; p < sizeof(jump_in) / sizeof(jump_in[0]); p++) {
7206     bcnt = 0;
7207     for (head = jump_in[p]; head != NULL; head = head->next) {
7208       tmp_blocks[bcnt].addr = head->vaddr;
7209       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
7210       bcnt++;
7211     }
7212     if (bcnt < 1)
7213       continue;
7214     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
7215
7216     addr = tmp_blocks[0].addr;
7217     for (s = d = 0; s < bcnt; s++) {
7218       if (tmp_blocks[s].addr < addr)
7219         continue;
7220       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
7221         tmp_blocks[d++] = tmp_blocks[s];
7222       addr = scan_for_ret(tmp_blocks[s].addr);
7223     }
7224
7225     if (o + d > maxcount)
7226       d = maxcount - o;
7227     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
7228     o += d;
7229   }
7230
7231   return o * sizeof(blocks[0]);
7232 }
7233
7234 void new_dynarec_load_blocks(const void *save, int size)
7235 {
7236   const struct savestate_block *blocks = save;
7237   int count = size / sizeof(blocks[0]);
7238   u_int regs_save[32];
7239   uint32_t f;
7240   int i, b;
7241
7242   get_addr(psxRegs.pc);
7243
7244   // change GPRs for speculation to at least partially work..
7245   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
7246   for (i = 1; i < 32; i++)
7247     psxRegs.GPR.r[i] = 0x80000000;
7248
7249   for (b = 0; b < count; b++) {
7250     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7251       if (f & 1)
7252         psxRegs.GPR.r[i] = 0x1f800000;
7253     }
7254
7255     get_addr(blocks[b].addr);
7256
7257     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7258       if (f & 1)
7259         psxRegs.GPR.r[i] = 0x80000000;
7260     }
7261   }
7262
7263   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
7264 }
7265
7266 int new_recompile_block(int addr)
7267 {
7268   u_int pagelimit = 0;
7269   u_int state_rflags = 0;
7270   int i;
7271
7272   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7273   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7274   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7275   //if(debug)
7276   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7277   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7278   /*if(Count>=312978186) {
7279     rlist();
7280   }*/
7281   //rlist();
7282
7283   // this is just for speculation
7284   for (i = 1; i < 32; i++) {
7285     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
7286       state_rflags |= 1 << i;
7287   }
7288
7289   start = (u_int)addr&~3;
7290   //assert(((u_int)addr&1)==0);
7291   new_dynarec_did_compile=1;
7292   if (Config.HLE && start == 0x80001000) // hlecall
7293   {
7294     // XXX: is this enough? Maybe check hleSoftCall?
7295     void *beginning=start_block();
7296     u_int page=get_page(start);
7297
7298     invalid_code[start>>12]=0;
7299     emit_movimm(start,0);
7300     emit_writeword(0,(int)&pcaddr);
7301     emit_jmp((int)new_dyna_leave);
7302     literal_pool(0);
7303     end_block(beginning);
7304     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
7305     return 0;
7306   }
7307
7308   source = get_source_start(start, &pagelimit);
7309   if (source == NULL) {
7310     SysPrintf("Compile at bogus memory address: %08x\n", addr);
7311     exit(1);
7312   }
7313
7314   /* Pass 1: disassemble */
7315   /* Pass 2: register dependencies, branch targets */
7316   /* Pass 3: register allocation */
7317   /* Pass 4: branch dependencies */
7318   /* Pass 5: pre-alloc */
7319   /* Pass 6: optimize clean/dirty state */
7320   /* Pass 7: flag 32-bit registers */
7321   /* Pass 8: assembly */
7322   /* Pass 9: linker */
7323   /* Pass 10: garbage collection / free memory */
7324
7325   int j;
7326   int done=0;
7327   unsigned int type,op,op2;
7328
7329   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7330
7331   /* Pass 1 disassembly */
7332
7333   for(i=0;!done;i++) {
7334     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
7335     minimum_free_regs[i]=0;
7336     opcode[i]=op=source[i]>>26;
7337     switch(op)
7338     {
7339       case 0x00: strcpy(insn[i],"special"); type=NI;
7340         op2=source[i]&0x3f;
7341         switch(op2)
7342         {
7343           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7344           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7345           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7346           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7347           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7348           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7349           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7350           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7351           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7352           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7353           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7354           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7355           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7356           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7357           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7358           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7359           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7360           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7361           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7362           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7363           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7364           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7365           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7366           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7367           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7368           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7369           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7370           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7371           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7372           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7373           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7374           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7375           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7376           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7377           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7378 #if 0
7379           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7380           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7381           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7382           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7383           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7384           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7385           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7386           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7387           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7388           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7389           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7390           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7391           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7392           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7393           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7394           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7395           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7396 #endif
7397         }
7398         break;
7399       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7400         op2=(source[i]>>16)&0x1f;
7401         switch(op2)
7402         {
7403           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7404           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7405           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7406           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7407           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7408           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7409           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7410           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7411           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7412           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7413           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7414           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7415           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7416           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7417         }
7418         break;
7419       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7420       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7421       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7422       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7423       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7424       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7425       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7426       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7427       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7428       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7429       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7430       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7431       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7432       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7433       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7434         op2=(source[i]>>21)&0x1f;
7435         switch(op2)
7436         {
7437           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7438           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7439           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7440           switch(source[i]&0x3f)
7441           {
7442             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7443             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7444             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7445             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7446             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7447             //case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7448           }
7449         }
7450         break;
7451       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7452         op2=(source[i]>>21)&0x1f;
7453         switch(op2)
7454         {
7455           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7456           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7457           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7458           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7459           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7460           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7461           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7462           switch((source[i]>>16)&0x3)
7463           {
7464             case 0x00: strcpy(insn[i],"BC1F"); break;
7465             case 0x01: strcpy(insn[i],"BC1T"); break;
7466             case 0x02: strcpy(insn[i],"BC1FL"); break;
7467             case 0x03: strcpy(insn[i],"BC1TL"); break;
7468           }
7469           break;
7470           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7471           switch(source[i]&0x3f)
7472           {
7473             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7474             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7475             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7476             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7477             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7478             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7479             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7480             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7481             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7482             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7483             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7484             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7485             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7486             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7487             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7488             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7489             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7490             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7491             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7492             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7493             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7494             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7495             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7496             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7497             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7498             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7499             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7500             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7501             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7502             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7503             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7504             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7505             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7506             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7507             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7508           }
7509           break;
7510           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7511           switch(source[i]&0x3f)
7512           {
7513             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7514             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
7515             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
7516             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
7517             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
7518             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
7519             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
7520             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
7521             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
7522             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
7523             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
7524             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
7525             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
7526             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
7527             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
7528             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
7529             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
7530             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
7531             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
7532             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
7533             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
7534             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
7535             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
7536             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
7537             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
7538             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
7539             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
7540             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
7541             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
7542             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
7543             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
7544             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
7545             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
7546             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
7547             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
7548           }
7549           break;
7550           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
7551           switch(source[i]&0x3f)
7552           {
7553             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
7554             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
7555           }
7556           break;
7557           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
7558           switch(source[i]&0x3f)
7559           {
7560             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
7561             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
7562           }
7563           break;
7564         }
7565         break;
7566 #if 0
7567       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
7568       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
7569       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
7570       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
7571       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
7572       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
7573       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
7574       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
7575 #endif
7576       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
7577       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
7578       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
7579       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
7580       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
7581       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
7582       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
7583 #if 0
7584       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
7585 #endif
7586       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
7587       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
7588       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
7589       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
7590 #if 0
7591       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
7592       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
7593 #endif
7594       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
7595       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
7596       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
7597       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
7598 #if 0
7599       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
7600       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
7601       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
7602 #endif
7603       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
7604       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
7605 #if 0
7606       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
7607       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
7608       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
7609 #endif
7610       case 0x12: strcpy(insn[i],"COP2"); type=NI;
7611         op2=(source[i]>>21)&0x1f;
7612         //if (op2 & 0x10) {
7613         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
7614           if (gte_handlers[source[i]&0x3f]!=NULL) {
7615             if (gte_regnames[source[i]&0x3f]!=NULL)
7616               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
7617             else
7618               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
7619             type=C2OP;
7620           }
7621         }
7622         else switch(op2)
7623         {
7624           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
7625           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
7626           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
7627           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
7628         }
7629         break;
7630       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
7631       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
7632       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
7633       default: strcpy(insn[i],"???"); type=NI;
7634         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
7635         break;
7636     }
7637     itype[i]=type;
7638     opcode2[i]=op2;
7639     /* Get registers/immediates */
7640     lt1[i]=0;
7641     us1[i]=0;
7642     us2[i]=0;
7643     dep1[i]=0;
7644     dep2[i]=0;
7645     gte_rs[i]=gte_rt[i]=0;
7646     switch(type) {
7647       case LOAD:
7648         rs1[i]=(source[i]>>21)&0x1f;
7649         rs2[i]=0;
7650         rt1[i]=(source[i]>>16)&0x1f;
7651         rt2[i]=0;
7652         imm[i]=(short)source[i];
7653         break;
7654       case STORE:
7655       case STORELR:
7656         rs1[i]=(source[i]>>21)&0x1f;
7657         rs2[i]=(source[i]>>16)&0x1f;
7658         rt1[i]=0;
7659         rt2[i]=0;
7660         imm[i]=(short)source[i];
7661         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
7662         break;
7663       case LOADLR:
7664         // LWL/LWR only load part of the register,
7665         // therefore the target register must be treated as a source too
7666         rs1[i]=(source[i]>>21)&0x1f;
7667         rs2[i]=(source[i]>>16)&0x1f;
7668         rt1[i]=(source[i]>>16)&0x1f;
7669         rt2[i]=0;
7670         imm[i]=(short)source[i];
7671         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
7672         if(op==0x26) dep1[i]=rt1[i]; // LWR
7673         break;
7674       case IMM16:
7675         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
7676         else rs1[i]=(source[i]>>21)&0x1f;
7677         rs2[i]=0;
7678         rt1[i]=(source[i]>>16)&0x1f;
7679         rt2[i]=0;
7680         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
7681           imm[i]=(unsigned short)source[i];
7682         }else{
7683           imm[i]=(short)source[i];
7684         }
7685         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
7686         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
7687         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
7688         break;
7689       case UJUMP:
7690         rs1[i]=0;
7691         rs2[i]=0;
7692         rt1[i]=0;
7693         rt2[i]=0;
7694         // The JAL instruction writes to r31.
7695         if (op&1) {
7696           rt1[i]=31;
7697         }
7698         rs2[i]=CCREG;
7699         break;
7700       case RJUMP:
7701         rs1[i]=(source[i]>>21)&0x1f;
7702         rs2[i]=0;
7703         rt1[i]=0;
7704         rt2[i]=0;
7705         // The JALR instruction writes to rd.
7706         if (op2&1) {
7707           rt1[i]=(source[i]>>11)&0x1f;
7708         }
7709         rs2[i]=CCREG;
7710         break;
7711       case CJUMP:
7712         rs1[i]=(source[i]>>21)&0x1f;
7713         rs2[i]=(source[i]>>16)&0x1f;
7714         rt1[i]=0;
7715         rt2[i]=0;
7716         if(op&2) { // BGTZ/BLEZ
7717           rs2[i]=0;
7718         }
7719         us1[i]=rs1[i];
7720         us2[i]=rs2[i];
7721         likely[i]=op>>4;
7722         break;
7723       case SJUMP:
7724         rs1[i]=(source[i]>>21)&0x1f;
7725         rs2[i]=CCREG;
7726         rt1[i]=0;
7727         rt2[i]=0;
7728         us1[i]=rs1[i];
7729         if(op2&0x10) { // BxxAL
7730           rt1[i]=31;
7731           // NOTE: If the branch is not taken, r31 is still overwritten
7732         }
7733         likely[i]=(op2&2)>>1;
7734         break;
7735       case FJUMP:
7736         rs1[i]=FSREG;
7737         rs2[i]=CSREG;
7738         rt1[i]=0;
7739         rt2[i]=0;
7740         likely[i]=((source[i])>>17)&1;
7741         break;
7742       case ALU:
7743         rs1[i]=(source[i]>>21)&0x1f; // source
7744         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
7745         rt1[i]=(source[i]>>11)&0x1f; // destination
7746         rt2[i]=0;
7747         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7748           us1[i]=rs1[i];us2[i]=rs2[i];
7749         }
7750         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7751           dep1[i]=rs1[i];dep2[i]=rs2[i];
7752         }
7753         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
7754           dep1[i]=rs1[i];dep2[i]=rs2[i];
7755         }
7756         break;
7757       case MULTDIV:
7758         rs1[i]=(source[i]>>21)&0x1f; // source
7759         rs2[i]=(source[i]>>16)&0x1f; // divisor
7760         rt1[i]=HIREG;
7761         rt2[i]=LOREG;
7762         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7763           us1[i]=rs1[i];us2[i]=rs2[i];
7764         }
7765         break;
7766       case MOV:
7767         rs1[i]=0;
7768         rs2[i]=0;
7769         rt1[i]=0;
7770         rt2[i]=0;
7771         if(op2==0x10) rs1[i]=HIREG; // MFHI
7772         if(op2==0x11) rt1[i]=HIREG; // MTHI
7773         if(op2==0x12) rs1[i]=LOREG; // MFLO
7774         if(op2==0x13) rt1[i]=LOREG; // MTLO
7775         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
7776         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
7777         dep1[i]=rs1[i];
7778         break;
7779       case SHIFT:
7780         rs1[i]=(source[i]>>16)&0x1f; // target of shift
7781         rs2[i]=(source[i]>>21)&0x1f; // shift amount
7782         rt1[i]=(source[i]>>11)&0x1f; // destination
7783         rt2[i]=0;
7784         // DSLLV/DSRLV/DSRAV are 64-bit
7785         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
7786         break;
7787       case SHIFTIMM:
7788         rs1[i]=(source[i]>>16)&0x1f;
7789         rs2[i]=0;
7790         rt1[i]=(source[i]>>11)&0x1f;
7791         rt2[i]=0;
7792         imm[i]=(source[i]>>6)&0x1f;
7793         // DSxx32 instructions
7794         if(op2>=0x3c) imm[i]|=0x20;
7795         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
7796         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
7797         break;
7798       case COP0:
7799         rs1[i]=0;
7800         rs2[i]=0;
7801         rt1[i]=0;
7802         rt2[i]=0;
7803         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
7804         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
7805         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
7806         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
7807         break;
7808       case COP1:
7809         rs1[i]=0;
7810         rs2[i]=0;
7811         rt1[i]=0;
7812         rt2[i]=0;
7813         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
7814         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
7815         if(op2==5) us1[i]=rs1[i]; // DMTC1
7816         rs2[i]=CSREG;
7817         break;
7818       case COP2:
7819         rs1[i]=0;
7820         rs2[i]=0;
7821         rt1[i]=0;
7822         rt2[i]=0;
7823         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
7824         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
7825         rs2[i]=CSREG;
7826         int gr=(source[i]>>11)&0x1F;
7827         switch(op2)
7828         {
7829           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
7830           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
7831           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
7832           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
7833         }
7834         break;
7835       case C1LS:
7836         rs1[i]=(source[i]>>21)&0x1F;
7837         rs2[i]=CSREG;
7838         rt1[i]=0;
7839         rt2[i]=0;
7840         imm[i]=(short)source[i];
7841         break;
7842       case C2LS:
7843         rs1[i]=(source[i]>>21)&0x1F;
7844         rs2[i]=0;
7845         rt1[i]=0;
7846         rt2[i]=0;
7847         imm[i]=(short)source[i];
7848         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
7849         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
7850         break;
7851       case C2OP:
7852         rs1[i]=0;
7853         rs2[i]=0;
7854         rt1[i]=0;
7855         rt2[i]=0;
7856         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
7857         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
7858         gte_rt[i]|=1ll<<63; // every op changes flags
7859         if((source[i]&0x3f)==GTE_MVMVA) {
7860           int v = (source[i] >> 15) & 3;
7861           gte_rs[i]&=~0xe3fll;
7862           if(v==3) gte_rs[i]|=0xe00ll;
7863           else gte_rs[i]|=3ll<<(v*2);
7864         }
7865         break;
7866       case FLOAT:
7867       case FCONV:
7868         rs1[i]=0;
7869         rs2[i]=CSREG;
7870         rt1[i]=0;
7871         rt2[i]=0;
7872         break;
7873       case FCOMP:
7874         rs1[i]=FSREG;
7875         rs2[i]=CSREG;
7876         rt1[i]=FSREG;
7877         rt2[i]=0;
7878         break;
7879       case SYSCALL:
7880       case HLECALL:
7881       case INTCALL:
7882         rs1[i]=CCREG;
7883         rs2[i]=0;
7884         rt1[i]=0;
7885         rt2[i]=0;
7886         break;
7887       default:
7888         rs1[i]=0;
7889         rs2[i]=0;
7890         rt1[i]=0;
7891         rt2[i]=0;
7892     }
7893     /* Calculate branch target addresses */
7894     if(type==UJUMP)
7895       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
7896     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
7897       ba[i]=start+i*4+8; // Ignore never taken branch
7898     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
7899       ba[i]=start+i*4+8; // Ignore never taken branch
7900     else if(type==CJUMP||type==SJUMP||type==FJUMP)
7901       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
7902     else ba[i]=-1;
7903     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
7904       int do_in_intrp=0;
7905       // branch in delay slot?
7906       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7907         // don't handle first branch and call interpreter if it's hit
7908         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7909         do_in_intrp=1;
7910       }
7911       // basic load delay detection
7912       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7913         int t=(ba[i-1]-start)/4;
7914         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7915           // jump target wants DS result - potential load delay effect
7916           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7917           do_in_intrp=1;
7918           bt[t+1]=1; // expected return from interpreter
7919         }
7920         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7921               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
7922           // v0 overwrite like this is a sign of trouble, bail out
7923           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7924           do_in_intrp=1;
7925         }
7926       }
7927       if(do_in_intrp) {
7928         rs1[i-1]=CCREG;
7929         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7930         ba[i-1]=-1;
7931         itype[i-1]=INTCALL;
7932         done=2;
7933         i--; // don't compile the DS
7934       }
7935     }
7936     /* Is this the end of the block? */
7937     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
7938       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7939         done=2;
7940       }
7941       else {
7942         if(stop_after_jal) done=1;
7943         // Stop on BREAK
7944         if((source[i+1]&0xfc00003f)==0x0d) done=1;
7945       }
7946       // Don't recompile stuff that's already compiled
7947       if(check_addr(start+i*4+4)) done=1;
7948       // Don't get too close to the limit
7949       if(i>MAXBLOCK/2) done=1;
7950     }
7951     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7952     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7953     if(done==2) {
7954       // Does the block continue due to a branch?
7955       for(j=i-1;j>=0;j--)
7956       {
7957         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7958         if(ba[j]==start+i*4+4) done=j=0;
7959         if(ba[j]==start+i*4+8) done=j=0;
7960       }
7961     }
7962     //assert(i<MAXBLOCK-1);
7963     if(start+i*4==pagelimit-4) done=1;
7964     assert(start+i*4<pagelimit);
7965     if (i==MAXBLOCK-1) done=1;
7966     // Stop if we're compiling junk
7967     if(itype[i]==NI&&opcode[i]==0x11) {
7968       done=stop_after_jal=1;
7969       SysPrintf("Disabled speculative precompilation\n");
7970     }
7971   }
7972   slen=i;
7973   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
7974     if(start+i*4==pagelimit) {
7975       itype[i-1]=SPAN;
7976     }
7977   }
7978   assert(slen>0);
7979
7980   /* Pass 2 - Register dependencies and branch targets */
7981
7982   unneeded_registers(0,slen-1,0);
7983
7984   /* Pass 3 - Register allocation */
7985
7986   struct regstat current; // Current register allocations/status
7987   current.is32=1;
7988   current.dirty=0;
7989   current.u=unneeded_reg[0];
7990   current.uu=unneeded_reg_upper[0];
7991   clear_all_regs(current.regmap);
7992   alloc_reg(&current,0,CCREG);
7993   dirty_reg(&current,CCREG);
7994   current.isconst=0;
7995   current.wasconst=0;
7996   current.waswritten=0;
7997   int ds=0;
7998   int cc=0;
7999   int hr=-1;
8000
8001   if((u_int)addr&1) {
8002     // First instruction is delay slot
8003     cc=-1;
8004     bt[1]=1;
8005     ds=1;
8006     unneeded_reg[0]=1;
8007     unneeded_reg_upper[0]=1;
8008     current.regmap[HOST_BTREG]=BTREG;
8009   }
8010
8011   for(i=0;i<slen;i++)
8012   {
8013     if(bt[i])
8014     {
8015       int hr;
8016       for(hr=0;hr<HOST_REGS;hr++)
8017       {
8018         // Is this really necessary?
8019         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8020       }
8021       current.isconst=0;
8022       current.waswritten=0;
8023     }
8024     if(i>1)
8025     {
8026       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8027       {
8028         if(rs1[i-2]==0||rs2[i-2]==0)
8029         {
8030           if(rs1[i-2]) {
8031             current.is32|=1LL<<rs1[i-2];
8032             int hr=get_reg(current.regmap,rs1[i-2]|64);
8033             if(hr>=0) current.regmap[hr]=-1;
8034           }
8035           if(rs2[i-2]) {
8036             current.is32|=1LL<<rs2[i-2];
8037             int hr=get_reg(current.regmap,rs2[i-2]|64);
8038             if(hr>=0) current.regmap[hr]=-1;
8039           }
8040         }
8041       }
8042     }
8043     current.is32=-1LL;
8044
8045     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8046     regs[i].wasconst=current.isconst;
8047     regs[i].was32=current.is32;
8048     regs[i].wasdirty=current.dirty;
8049     regs[i].loadedconst=0;
8050     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8051       if(i+1<slen) {
8052         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8053         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8054         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8055         current.u|=1;
8056         current.uu|=1;
8057       } else {
8058         current.u=1;
8059         current.uu=1;
8060       }
8061     } else {
8062       if(i+1<slen) {
8063         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8064         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8065         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8066         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8067         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8068         current.u|=1;
8069         current.uu|=1;
8070       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
8071     }
8072     is_ds[i]=ds;
8073     if(ds) {
8074       ds=0; // Skip delay slot, already allocated as part of branch
8075       // ...but we need to alloc it in case something jumps here
8076       if(i+1<slen) {
8077         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8078         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8079       }else{
8080         current.u=branch_unneeded_reg[i-1];
8081         current.uu=branch_unneeded_reg_upper[i-1];
8082       }
8083       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8084       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8085       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8086       current.u|=1;
8087       current.uu|=1;
8088       struct regstat temp;
8089       memcpy(&temp,&current,sizeof(current));
8090       temp.wasdirty=temp.dirty;
8091       temp.was32=temp.is32;
8092       // TODO: Take into account unconditional branches, as below
8093       delayslot_alloc(&temp,i);
8094       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8095       regs[i].wasdirty=temp.wasdirty;
8096       regs[i].was32=temp.was32;
8097       regs[i].dirty=temp.dirty;
8098       regs[i].is32=temp.is32;
8099       regs[i].isconst=0;
8100       regs[i].wasconst=0;
8101       current.isconst=0;
8102       // Create entry (branch target) regmap
8103       for(hr=0;hr<HOST_REGS;hr++)
8104       {
8105         int r=temp.regmap[hr];
8106         if(r>=0) {
8107           if(r!=regmap_pre[i][hr]) {
8108             regs[i].regmap_entry[hr]=-1;
8109           }
8110           else
8111           {
8112             if(r<64){
8113               if((current.u>>r)&1) {
8114                 regs[i].regmap_entry[hr]=-1;
8115                 regs[i].regmap[hr]=-1;
8116                 //Don't clear regs in the delay slot as the branch might need them
8117                 //current.regmap[hr]=-1;
8118               }else
8119                 regs[i].regmap_entry[hr]=r;
8120             }
8121             else {
8122               if((current.uu>>(r&63))&1) {
8123                 regs[i].regmap_entry[hr]=-1;
8124                 regs[i].regmap[hr]=-1;
8125                 //Don't clear regs in the delay slot as the branch might need them
8126                 //current.regmap[hr]=-1;
8127               }else
8128                 regs[i].regmap_entry[hr]=r;
8129             }
8130           }
8131         } else {
8132           // First instruction expects CCREG to be allocated
8133           if(i==0&&hr==HOST_CCREG)
8134             regs[i].regmap_entry[hr]=CCREG;
8135           else
8136             regs[i].regmap_entry[hr]=-1;
8137         }
8138       }
8139     }
8140     else { // Not delay slot
8141       switch(itype[i]) {
8142         case UJUMP:
8143           //current.isconst=0; // DEBUG
8144           //current.wasconst=0; // DEBUG
8145           //regs[i].wasconst=0; // DEBUG
8146           clear_const(&current,rt1[i]);
8147           alloc_cc(&current,i);
8148           dirty_reg(&current,CCREG);
8149           if (rt1[i]==31) {
8150             alloc_reg(&current,i,31);
8151             dirty_reg(&current,31);
8152             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8153             //assert(rt1[i+1]!=rt1[i]);
8154             #ifdef REG_PREFETCH
8155             alloc_reg(&current,i,PTEMP);
8156             #endif
8157             //current.is32|=1LL<<rt1[i];
8158           }
8159           ooo[i]=1;
8160           delayslot_alloc(&current,i+1);
8161           //current.isconst=0; // DEBUG
8162           ds=1;
8163           //printf("i=%d, isconst=%x\n",i,current.isconst);
8164           break;
8165         case RJUMP:
8166           //current.isconst=0;
8167           //current.wasconst=0;
8168           //regs[i].wasconst=0;
8169           clear_const(&current,rs1[i]);
8170           clear_const(&current,rt1[i]);
8171           alloc_cc(&current,i);
8172           dirty_reg(&current,CCREG);
8173           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8174             alloc_reg(&current,i,rs1[i]);
8175             if (rt1[i]!=0) {
8176               alloc_reg(&current,i,rt1[i]);
8177               dirty_reg(&current,rt1[i]);
8178               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
8179               assert(rt1[i+1]!=rt1[i]);
8180               #ifdef REG_PREFETCH
8181               alloc_reg(&current,i,PTEMP);
8182               #endif
8183             }
8184             #ifdef USE_MINI_HT
8185             if(rs1[i]==31) { // JALR
8186               alloc_reg(&current,i,RHASH);
8187               #ifndef HOST_IMM_ADDR32
8188               alloc_reg(&current,i,RHTBL);
8189               #endif
8190             }
8191             #endif
8192             delayslot_alloc(&current,i+1);
8193           } else {
8194             // The delay slot overwrites our source register,
8195             // allocate a temporary register to hold the old value.
8196             current.isconst=0;
8197             current.wasconst=0;
8198             regs[i].wasconst=0;
8199             delayslot_alloc(&current,i+1);
8200             current.isconst=0;
8201             alloc_reg(&current,i,RTEMP);
8202           }
8203           //current.isconst=0; // DEBUG
8204           ooo[i]=1;
8205           ds=1;
8206           break;
8207         case CJUMP:
8208           //current.isconst=0;
8209           //current.wasconst=0;
8210           //regs[i].wasconst=0;
8211           clear_const(&current,rs1[i]);
8212           clear_const(&current,rs2[i]);
8213           if((opcode[i]&0x3E)==4) // BEQ/BNE
8214           {
8215             alloc_cc(&current,i);
8216             dirty_reg(&current,CCREG);
8217             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8218             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8219             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8220             {
8221               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8222               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8223             }
8224             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8225                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8226               // The delay slot overwrites one of our conditions.
8227               // Allocate the branch condition registers instead.
8228               current.isconst=0;
8229               current.wasconst=0;
8230               regs[i].wasconst=0;
8231               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8232               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8233               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8234               {
8235                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8236                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8237               }
8238             }
8239             else
8240             {
8241               ooo[i]=1;
8242               delayslot_alloc(&current,i+1);
8243             }
8244           }
8245           else
8246           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8247           {
8248             alloc_cc(&current,i);
8249             dirty_reg(&current,CCREG);
8250             alloc_reg(&current,i,rs1[i]);
8251             if(!(current.is32>>rs1[i]&1))
8252             {
8253               alloc_reg64(&current,i,rs1[i]);
8254             }
8255             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8256               // The delay slot overwrites one of our conditions.
8257               // Allocate the branch condition registers instead.
8258               current.isconst=0;
8259               current.wasconst=0;
8260               regs[i].wasconst=0;
8261               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8262               if(!((current.is32>>rs1[i])&1))
8263               {
8264                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8265               }
8266             }
8267             else
8268             {
8269               ooo[i]=1;
8270               delayslot_alloc(&current,i+1);
8271             }
8272           }
8273           else
8274           // Don't alloc the delay slot yet because we might not execute it
8275           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8276           {
8277             current.isconst=0;
8278             current.wasconst=0;
8279             regs[i].wasconst=0;
8280             alloc_cc(&current,i);
8281             dirty_reg(&current,CCREG);
8282             alloc_reg(&current,i,rs1[i]);
8283             alloc_reg(&current,i,rs2[i]);
8284             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8285             {
8286               alloc_reg64(&current,i,rs1[i]);
8287               alloc_reg64(&current,i,rs2[i]);
8288             }
8289           }
8290           else
8291           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8292           {
8293             current.isconst=0;
8294             current.wasconst=0;
8295             regs[i].wasconst=0;
8296             alloc_cc(&current,i);
8297             dirty_reg(&current,CCREG);
8298             alloc_reg(&current,i,rs1[i]);
8299             if(!(current.is32>>rs1[i]&1))
8300             {
8301               alloc_reg64(&current,i,rs1[i]);
8302             }
8303           }
8304           ds=1;
8305           //current.isconst=0;
8306           break;
8307         case SJUMP:
8308           //current.isconst=0;
8309           //current.wasconst=0;
8310           //regs[i].wasconst=0;
8311           clear_const(&current,rs1[i]);
8312           clear_const(&current,rt1[i]);
8313           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8314           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8315           {
8316             alloc_cc(&current,i);
8317             dirty_reg(&current,CCREG);
8318             alloc_reg(&current,i,rs1[i]);
8319             if(!(current.is32>>rs1[i]&1))
8320             {
8321               alloc_reg64(&current,i,rs1[i]);
8322             }
8323             if (rt1[i]==31) { // BLTZAL/BGEZAL
8324               alloc_reg(&current,i,31);
8325               dirty_reg(&current,31);
8326               //#ifdef REG_PREFETCH
8327               //alloc_reg(&current,i,PTEMP);
8328               //#endif
8329               //current.is32|=1LL<<rt1[i];
8330             }
8331             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
8332                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
8333               // Allocate the branch condition registers instead.
8334               current.isconst=0;
8335               current.wasconst=0;
8336               regs[i].wasconst=0;
8337               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8338               if(!((current.is32>>rs1[i])&1))
8339               {
8340                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8341               }
8342             }
8343             else
8344             {
8345               ooo[i]=1;
8346               delayslot_alloc(&current,i+1);
8347             }
8348           }
8349           else
8350           // Don't alloc the delay slot yet because we might not execute it
8351           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8352           {
8353             current.isconst=0;
8354             current.wasconst=0;
8355             regs[i].wasconst=0;
8356             alloc_cc(&current,i);
8357             dirty_reg(&current,CCREG);
8358             alloc_reg(&current,i,rs1[i]);
8359             if(!(current.is32>>rs1[i]&1))
8360             {
8361               alloc_reg64(&current,i,rs1[i]);
8362             }
8363           }
8364           ds=1;
8365           //current.isconst=0;
8366           break;
8367         case FJUMP:
8368           current.isconst=0;
8369           current.wasconst=0;
8370           regs[i].wasconst=0;
8371           if(likely[i]==0) // BC1F/BC1T
8372           {
8373             // TODO: Theoretically we can run out of registers here on x86.
8374             // The delay slot can allocate up to six, and we need to check
8375             // CSREG before executing the delay slot.  Possibly we can drop
8376             // the cycle count and then reload it after checking that the
8377             // FPU is in a usable state, or don't do out-of-order execution.
8378             alloc_cc(&current,i);
8379             dirty_reg(&current,CCREG);
8380             alloc_reg(&current,i,FSREG);
8381             alloc_reg(&current,i,CSREG);
8382             if(itype[i+1]==FCOMP) {
8383               // The delay slot overwrites the branch condition.
8384               // Allocate the branch condition registers instead.
8385               alloc_cc(&current,i);
8386               dirty_reg(&current,CCREG);
8387               alloc_reg(&current,i,CSREG);
8388               alloc_reg(&current,i,FSREG);
8389             }
8390             else {
8391               ooo[i]=1;
8392               delayslot_alloc(&current,i+1);
8393               alloc_reg(&current,i+1,CSREG);
8394             }
8395           }
8396           else
8397           // Don't alloc the delay slot yet because we might not execute it
8398           if(likely[i]) // BC1FL/BC1TL
8399           {
8400             alloc_cc(&current,i);
8401             dirty_reg(&current,CCREG);
8402             alloc_reg(&current,i,CSREG);
8403             alloc_reg(&current,i,FSREG);
8404           }
8405           ds=1;
8406           current.isconst=0;
8407           break;
8408         case IMM16:
8409           imm16_alloc(&current,i);
8410           break;
8411         case LOAD:
8412         case LOADLR:
8413           load_alloc(&current,i);
8414           break;
8415         case STORE:
8416         case STORELR:
8417           store_alloc(&current,i);
8418           break;
8419         case ALU:
8420           alu_alloc(&current,i);
8421           break;
8422         case SHIFT:
8423           shift_alloc(&current,i);
8424           break;
8425         case MULTDIV:
8426           multdiv_alloc(&current,i);
8427           break;
8428         case SHIFTIMM:
8429           shiftimm_alloc(&current,i);
8430           break;
8431         case MOV:
8432           mov_alloc(&current,i);
8433           break;
8434         case COP0:
8435           cop0_alloc(&current,i);
8436           break;
8437         case COP1:
8438         case COP2:
8439           cop1_alloc(&current,i);
8440           break;
8441         case C1LS:
8442           c1ls_alloc(&current,i);
8443           break;
8444         case C2LS:
8445           c2ls_alloc(&current,i);
8446           break;
8447         case C2OP:
8448           c2op_alloc(&current,i);
8449           break;
8450         case FCONV:
8451           fconv_alloc(&current,i);
8452           break;
8453         case FLOAT:
8454           float_alloc(&current,i);
8455           break;
8456         case FCOMP:
8457           fcomp_alloc(&current,i);
8458           break;
8459         case SYSCALL:
8460         case HLECALL:
8461         case INTCALL:
8462           syscall_alloc(&current,i);
8463           break;
8464         case SPAN:
8465           pagespan_alloc(&current,i);
8466           break;
8467       }
8468
8469       // Drop the upper half of registers that have become 32-bit
8470       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8471       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8472         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8473         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8474         current.uu|=1;
8475       } else {
8476         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8477         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8478         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8479         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8480         current.uu|=1;
8481       }
8482
8483       // Create entry (branch target) regmap
8484       for(hr=0;hr<HOST_REGS;hr++)
8485       {
8486         int r,or;
8487         r=current.regmap[hr];
8488         if(r>=0) {
8489           if(r!=regmap_pre[i][hr]) {
8490             // TODO: delay slot (?)
8491             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
8492             if(or<0||(r&63)>=TEMPREG){
8493               regs[i].regmap_entry[hr]=-1;
8494             }
8495             else
8496             {
8497               // Just move it to a different register
8498               regs[i].regmap_entry[hr]=r;
8499               // If it was dirty before, it's still dirty
8500               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
8501             }
8502           }
8503           else
8504           {
8505             // Unneeded
8506             if(r==0){
8507               regs[i].regmap_entry[hr]=0;
8508             }
8509             else
8510             if(r<64){
8511               if((current.u>>r)&1) {
8512                 regs[i].regmap_entry[hr]=-1;
8513                 //regs[i].regmap[hr]=-1;
8514                 current.regmap[hr]=-1;
8515               }else
8516                 regs[i].regmap_entry[hr]=r;
8517             }
8518             else {
8519               if((current.uu>>(r&63))&1) {
8520                 regs[i].regmap_entry[hr]=-1;
8521                 //regs[i].regmap[hr]=-1;
8522                 current.regmap[hr]=-1;
8523               }else
8524                 regs[i].regmap_entry[hr]=r;
8525             }
8526           }
8527         } else {
8528           // Branches expect CCREG to be allocated at the target
8529           if(regmap_pre[i][hr]==CCREG)
8530             regs[i].regmap_entry[hr]=CCREG;
8531           else
8532             regs[i].regmap_entry[hr]=-1;
8533         }
8534       }
8535       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
8536     }
8537
8538     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
8539       current.waswritten|=1<<rs1[i-1];
8540     current.waswritten&=~(1<<rt1[i]);
8541     current.waswritten&=~(1<<rt2[i]);
8542     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
8543       current.waswritten&=~(1<<rs1[i]);
8544
8545     /* Branch post-alloc */
8546     if(i>0)
8547     {
8548       current.was32=current.is32;
8549       current.wasdirty=current.dirty;
8550       switch(itype[i-1]) {
8551         case UJUMP:
8552           memcpy(&branch_regs[i-1],&current,sizeof(current));
8553           branch_regs[i-1].isconst=0;
8554           branch_regs[i-1].wasconst=0;
8555           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8556           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8557           alloc_cc(&branch_regs[i-1],i-1);
8558           dirty_reg(&branch_regs[i-1],CCREG);
8559           if(rt1[i-1]==31) { // JAL
8560             alloc_reg(&branch_regs[i-1],i-1,31);
8561             dirty_reg(&branch_regs[i-1],31);
8562             branch_regs[i-1].is32|=1LL<<31;
8563           }
8564           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8565           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8566           break;
8567         case RJUMP:
8568           memcpy(&branch_regs[i-1],&current,sizeof(current));
8569           branch_regs[i-1].isconst=0;
8570           branch_regs[i-1].wasconst=0;
8571           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8572           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8573           alloc_cc(&branch_regs[i-1],i-1);
8574           dirty_reg(&branch_regs[i-1],CCREG);
8575           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
8576           if(rt1[i-1]!=0) { // JALR
8577             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
8578             dirty_reg(&branch_regs[i-1],rt1[i-1]);
8579             branch_regs[i-1].is32|=1LL<<rt1[i-1];
8580           }
8581           #ifdef USE_MINI_HT
8582           if(rs1[i-1]==31) { // JALR
8583             alloc_reg(&branch_regs[i-1],i-1,RHASH);
8584             #ifndef HOST_IMM_ADDR32
8585             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
8586             #endif
8587           }
8588           #endif
8589           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8590           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8591           break;
8592         case CJUMP:
8593           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
8594           {
8595             alloc_cc(&current,i-1);
8596             dirty_reg(&current,CCREG);
8597             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
8598                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
8599               // The delay slot overwrote one of our conditions
8600               // Delay slot goes after the test (in order)
8601               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8602               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8603               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8604               current.u|=1;
8605               current.uu|=1;
8606               delayslot_alloc(&current,i);
8607               current.isconst=0;
8608             }
8609             else
8610             {
8611               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8612               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8613               // Alloc the branch condition registers
8614               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
8615               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
8616               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
8617               {
8618                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
8619                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
8620               }
8621             }
8622             memcpy(&branch_regs[i-1],&current,sizeof(current));
8623             branch_regs[i-1].isconst=0;
8624             branch_regs[i-1].wasconst=0;
8625             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8626             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8627           }
8628           else
8629           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
8630           {
8631             alloc_cc(&current,i-1);
8632             dirty_reg(&current,CCREG);
8633             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8634               // The delay slot overwrote the branch condition
8635               // Delay slot goes after the test (in order)
8636               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8637               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8638               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8639               current.u|=1;
8640               current.uu|=1;
8641               delayslot_alloc(&current,i);
8642               current.isconst=0;
8643             }
8644             else
8645             {
8646               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8647               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8648               // Alloc the branch condition register
8649               alloc_reg(&current,i-1,rs1[i-1]);
8650               if(!(current.is32>>rs1[i-1]&1))
8651               {
8652                 alloc_reg64(&current,i-1,rs1[i-1]);
8653               }
8654             }
8655             memcpy(&branch_regs[i-1],&current,sizeof(current));
8656             branch_regs[i-1].isconst=0;
8657             branch_regs[i-1].wasconst=0;
8658             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8659             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8660           }
8661           else
8662           // Alloc the delay slot in case the branch is taken
8663           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
8664           {
8665             memcpy(&branch_regs[i-1],&current,sizeof(current));
8666             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8667             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8668             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8669             alloc_cc(&branch_regs[i-1],i);
8670             dirty_reg(&branch_regs[i-1],CCREG);
8671             delayslot_alloc(&branch_regs[i-1],i);
8672             branch_regs[i-1].isconst=0;
8673             alloc_reg(&current,i,CCREG); // Not taken path
8674             dirty_reg(&current,CCREG);
8675             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8676           }
8677           else
8678           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
8679           {
8680             memcpy(&branch_regs[i-1],&current,sizeof(current));
8681             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8682             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8683             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8684             alloc_cc(&branch_regs[i-1],i);
8685             dirty_reg(&branch_regs[i-1],CCREG);
8686             delayslot_alloc(&branch_regs[i-1],i);
8687             branch_regs[i-1].isconst=0;
8688             alloc_reg(&current,i,CCREG); // Not taken path
8689             dirty_reg(&current,CCREG);
8690             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8691           }
8692           break;
8693         case SJUMP:
8694           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
8695           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
8696           {
8697             alloc_cc(&current,i-1);
8698             dirty_reg(&current,CCREG);
8699             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8700               // The delay slot overwrote the branch condition
8701               // Delay slot goes after the test (in order)
8702               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8703               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8704               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8705               current.u|=1;
8706               current.uu|=1;
8707               delayslot_alloc(&current,i);
8708               current.isconst=0;
8709             }
8710             else
8711             {
8712               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8713               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8714               // Alloc the branch condition register
8715               alloc_reg(&current,i-1,rs1[i-1]);
8716               if(!(current.is32>>rs1[i-1]&1))
8717               {
8718                 alloc_reg64(&current,i-1,rs1[i-1]);
8719               }
8720             }
8721             memcpy(&branch_regs[i-1],&current,sizeof(current));
8722             branch_regs[i-1].isconst=0;
8723             branch_regs[i-1].wasconst=0;
8724             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8725             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8726           }
8727           else
8728           // Alloc the delay slot in case the branch is taken
8729           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
8730           {
8731             memcpy(&branch_regs[i-1],&current,sizeof(current));
8732             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8733             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8734             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8735             alloc_cc(&branch_regs[i-1],i);
8736             dirty_reg(&branch_regs[i-1],CCREG);
8737             delayslot_alloc(&branch_regs[i-1],i);
8738             branch_regs[i-1].isconst=0;
8739             alloc_reg(&current,i,CCREG); // Not taken path
8740             dirty_reg(&current,CCREG);
8741             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8742           }
8743           // FIXME: BLTZAL/BGEZAL
8744           if(opcode2[i-1]&0x10) { // BxxZAL
8745             alloc_reg(&branch_regs[i-1],i-1,31);
8746             dirty_reg(&branch_regs[i-1],31);
8747             branch_regs[i-1].is32|=1LL<<31;
8748           }
8749           break;
8750         case FJUMP:
8751           if(likely[i-1]==0) // BC1F/BC1T
8752           {
8753             alloc_cc(&current,i-1);
8754             dirty_reg(&current,CCREG);
8755             if(itype[i]==FCOMP) {
8756               // The delay slot overwrote the branch condition
8757               // Delay slot goes after the test (in order)
8758               delayslot_alloc(&current,i);
8759               current.isconst=0;
8760             }
8761             else
8762             {
8763               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8764               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8765               // Alloc the branch condition register
8766               alloc_reg(&current,i-1,FSREG);
8767             }
8768             memcpy(&branch_regs[i-1],&current,sizeof(current));
8769             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8770           }
8771           else // BC1FL/BC1TL
8772           {
8773             // Alloc the delay slot in case the branch is taken
8774             memcpy(&branch_regs[i-1],&current,sizeof(current));
8775             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8776             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8777             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8778             alloc_cc(&branch_regs[i-1],i);
8779             dirty_reg(&branch_regs[i-1],CCREG);
8780             delayslot_alloc(&branch_regs[i-1],i);
8781             branch_regs[i-1].isconst=0;
8782             alloc_reg(&current,i,CCREG); // Not taken path
8783             dirty_reg(&current,CCREG);
8784             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8785           }
8786           break;
8787       }
8788
8789       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
8790       {
8791         if(rt1[i-1]==31) // JAL/JALR
8792         {
8793           // Subroutine call will return here, don't alloc any registers
8794           current.is32=1;
8795           current.dirty=0;
8796           clear_all_regs(current.regmap);
8797           alloc_reg(&current,i,CCREG);
8798           dirty_reg(&current,CCREG);
8799         }
8800         else if(i+1<slen)
8801         {
8802           // Internal branch will jump here, match registers to caller
8803           current.is32=0x3FFFFFFFFLL;
8804           current.dirty=0;
8805           clear_all_regs(current.regmap);
8806           alloc_reg(&current,i,CCREG);
8807           dirty_reg(&current,CCREG);
8808           for(j=i-1;j>=0;j--)
8809           {
8810             if(ba[j]==start+i*4+4) {
8811               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
8812               current.is32=branch_regs[j].is32;
8813               current.dirty=branch_regs[j].dirty;
8814               break;
8815             }
8816           }
8817           while(j>=0) {
8818             if(ba[j]==start+i*4+4) {
8819               for(hr=0;hr<HOST_REGS;hr++) {
8820                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
8821                   current.regmap[hr]=-1;
8822                 }
8823                 current.is32&=branch_regs[j].is32;
8824                 current.dirty&=branch_regs[j].dirty;
8825               }
8826             }
8827             j--;
8828           }
8829         }
8830       }
8831     }
8832
8833     // Count cycles in between branches
8834     ccadj[i]=cc;
8835     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
8836     {
8837       cc=0;
8838     }
8839 #if !defined(DRC_DBG)
8840     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
8841     {
8842       // GTE runs in parallel until accessed, divide by 2 for a rough guess
8843       cc+=gte_cycletab[source[i]&0x3f]/2;
8844     }
8845     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
8846     {
8847       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
8848     }
8849     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
8850     {
8851       cc+=4;
8852     }
8853     else if(itype[i]==C2LS)
8854     {
8855       cc+=4;
8856     }
8857 #endif
8858     else
8859     {
8860       cc++;
8861     }
8862
8863     flush_dirty_uppers(&current);
8864     if(!is_ds[i]) {
8865       regs[i].is32=current.is32;
8866       regs[i].dirty=current.dirty;
8867       regs[i].isconst=current.isconst;
8868       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
8869     }
8870     for(hr=0;hr<HOST_REGS;hr++) {
8871       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
8872         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
8873           regs[i].wasconst&=~(1<<hr);
8874         }
8875       }
8876     }
8877     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
8878     regs[i].waswritten=current.waswritten;
8879   }
8880
8881   /* Pass 4 - Cull unused host registers */
8882
8883   uint64_t nr=0;
8884
8885   for (i=slen-1;i>=0;i--)
8886   {
8887     int hr;
8888     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
8889     {
8890       if(ba[i]<start || ba[i]>=(start+slen*4))
8891       {
8892         // Branch out of this block, don't need anything
8893         nr=0;
8894       }
8895       else
8896       {
8897         // Internal branch
8898         // Need whatever matches the target
8899         nr=0;
8900         int t=(ba[i]-start)>>2;
8901         for(hr=0;hr<HOST_REGS;hr++)
8902         {
8903           if(regs[i].regmap_entry[hr]>=0) {
8904             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
8905           }
8906         }
8907       }
8908       // Conditional branch may need registers for following instructions
8909       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
8910       {
8911         if(i<slen-2) {
8912           nr|=needed_reg[i+2];
8913           for(hr=0;hr<HOST_REGS;hr++)
8914           {
8915             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
8916             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
8917           }
8918         }
8919       }
8920       // Don't need stuff which is overwritten
8921       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8922       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8923       // Merge in delay slot
8924       for(hr=0;hr<HOST_REGS;hr++)
8925       {
8926         if(!likely[i]) {
8927           // These are overwritten unless the branch is "likely"
8928           // and the delay slot is nullified if not taken
8929           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8930           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8931         }
8932         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8933         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8934         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8935         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8936         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8937         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8938         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8939         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8940         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
8941           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8942           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8943         }
8944         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
8945           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8946           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8947         }
8948         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
8949           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8950           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8951         }
8952       }
8953     }
8954     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
8955     {
8956       // SYSCALL instruction (software interrupt)
8957       nr=0;
8958     }
8959     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
8960     {
8961       // ERET instruction (return from interrupt)
8962       nr=0;
8963     }
8964     else // Non-branch
8965     {
8966       if(i<slen-1) {
8967         for(hr=0;hr<HOST_REGS;hr++) {
8968           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
8969           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
8970           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8971           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8972         }
8973       }
8974     }
8975     for(hr=0;hr<HOST_REGS;hr++)
8976     {
8977       // Overwritten registers are not needed
8978       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8979       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8980       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8981       // Source registers are needed
8982       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8983       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8984       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
8985       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
8986       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8987       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8988       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8989       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8990       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
8991         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8992         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8993       }
8994       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
8995         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8996         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8997       }
8998       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
8999         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9000         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9001       }
9002       // Don't store a register immediately after writing it,
9003       // may prevent dual-issue.
9004       // But do so if this is a branch target, otherwise we
9005       // might have to load the register before the branch.
9006       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9007         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9008            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9009           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9010           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9011         }
9012         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9013            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9014           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9015           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9016         }
9017       }
9018     }
9019     // Cycle count is needed at branches.  Assume it is needed at the target too.
9020     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9021       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9022       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9023     }
9024     // Save it
9025     needed_reg[i]=nr;
9026
9027     // Deallocate unneeded registers
9028     for(hr=0;hr<HOST_REGS;hr++)
9029     {
9030       if(!((nr>>hr)&1)) {
9031         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9032         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9033            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9034            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9035         {
9036           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9037           {
9038             if(likely[i]) {
9039               regs[i].regmap[hr]=-1;
9040               regs[i].isconst&=~(1<<hr);
9041               if(i<slen-2) {
9042                 regmap_pre[i+2][hr]=-1;
9043                 regs[i+2].wasconst&=~(1<<hr);
9044               }
9045             }
9046           }
9047         }
9048         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9049         {
9050           int d1=0,d2=0,map=0,temp=0;
9051           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9052           {
9053             d1=dep1[i+1];
9054             d2=dep2[i+1];
9055           }
9056           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9057              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9058             map=INVCP;
9059           }
9060           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9061              itype[i+1]==C1LS || itype[i+1]==C2LS)
9062             temp=FTEMP;
9063           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9064              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9065              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9066              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9067              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9068              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9069              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9070              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9071              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9072              regs[i].regmap[hr]!=map )
9073           {
9074             regs[i].regmap[hr]=-1;
9075             regs[i].isconst&=~(1<<hr);
9076             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9077                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9078                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9079                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9080                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9081                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9082                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9083                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9084                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9085                branch_regs[i].regmap[hr]!=map)
9086             {
9087               branch_regs[i].regmap[hr]=-1;
9088               branch_regs[i].regmap_entry[hr]=-1;
9089               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9090               {
9091                 if(!likely[i]&&i<slen-2) {
9092                   regmap_pre[i+2][hr]=-1;
9093                   regs[i+2].wasconst&=~(1<<hr);
9094                 }
9095               }
9096             }
9097           }
9098         }
9099         else
9100         {
9101           // Non-branch
9102           if(i>0)
9103           {
9104             int d1=0,d2=0,map=-1,temp=-1;
9105             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9106             {
9107               d1=dep1[i];
9108               d2=dep2[i];
9109             }
9110             if(itype[i]==STORE || itype[i]==STORELR ||
9111                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9112               map=INVCP;
9113             }
9114             if(itype[i]==LOADLR || itype[i]==STORELR ||
9115                itype[i]==C1LS || itype[i]==C2LS)
9116               temp=FTEMP;
9117             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9118                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9119                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9120                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9121                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9122                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9123             {
9124               if(i<slen-1&&!is_ds[i]) {
9125                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9126                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9127                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9128                 {
9129                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9130                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9131                 }
9132                 regmap_pre[i+1][hr]=-1;
9133                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9134                 regs[i+1].wasconst&=~(1<<hr);
9135               }
9136               regs[i].regmap[hr]=-1;
9137               regs[i].isconst&=~(1<<hr);
9138             }
9139           }
9140         }
9141       }
9142     }
9143   }
9144
9145   /* Pass 5 - Pre-allocate registers */
9146
9147   // If a register is allocated during a loop, try to allocate it for the
9148   // entire loop, if possible.  This avoids loading/storing registers
9149   // inside of the loop.
9150
9151   signed char f_regmap[HOST_REGS];
9152   clear_all_regs(f_regmap);
9153   for(i=0;i<slen-1;i++)
9154   {
9155     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9156     {
9157       if(ba[i]>=start && ba[i]<(start+i*4))
9158       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9159       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9160       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9161       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9162       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9163       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9164       {
9165         int t=(ba[i]-start)>>2;
9166         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9167         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
9168         for(hr=0;hr<HOST_REGS;hr++)
9169         {
9170           if(regs[i].regmap[hr]>64) {
9171             if(!((regs[i].dirty>>hr)&1))
9172               f_regmap[hr]=regs[i].regmap[hr];
9173             else f_regmap[hr]=-1;
9174           }
9175           else if(regs[i].regmap[hr]>=0) {
9176             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9177               // dealloc old register
9178               int n;
9179               for(n=0;n<HOST_REGS;n++)
9180               {
9181                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9182               }
9183               // and alloc new one
9184               f_regmap[hr]=regs[i].regmap[hr];
9185             }
9186           }
9187           if(branch_regs[i].regmap[hr]>64) {
9188             if(!((branch_regs[i].dirty>>hr)&1))
9189               f_regmap[hr]=branch_regs[i].regmap[hr];
9190             else f_regmap[hr]=-1;
9191           }
9192           else if(branch_regs[i].regmap[hr]>=0) {
9193             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9194               // dealloc old register
9195               int n;
9196               for(n=0;n<HOST_REGS;n++)
9197               {
9198                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9199               }
9200               // and alloc new one
9201               f_regmap[hr]=branch_regs[i].regmap[hr];
9202             }
9203           }
9204           if(ooo[i]) {
9205             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
9206               f_regmap[hr]=branch_regs[i].regmap[hr];
9207           }else{
9208             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
9209               f_regmap[hr]=branch_regs[i].regmap[hr];
9210           }
9211           // Avoid dirty->clean transition
9212           #ifdef DESTRUCTIVE_WRITEBACK
9213           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9214           #endif
9215           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
9216           // case above, however it's always a good idea.  We can't hoist the
9217           // load if the register was already allocated, so there's no point
9218           // wasting time analyzing most of these cases.  It only "succeeds"
9219           // when the mapping was different and the load can be replaced with
9220           // a mov, which is of negligible benefit.  So such cases are
9221           // skipped below.
9222           if(f_regmap[hr]>0) {
9223             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
9224               int r=f_regmap[hr];
9225               for(j=t;j<=i;j++)
9226               {
9227                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9228                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9229                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9230                 if(r>63) {
9231                   // NB This can exclude the case where the upper-half
9232                   // register is lower numbered than the lower-half
9233                   // register.  Not sure if it's worth fixing...
9234                   if(get_reg(regs[j].regmap,r&63)<0) break;
9235                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
9236                   if(regs[j].is32&(1LL<<(r&63))) break;
9237                 }
9238                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9239                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9240                   int k;
9241                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9242                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9243                     if(r>63) {
9244                       if(get_reg(regs[i].regmap,r&63)<0) break;
9245                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9246                     }
9247                     k=i;
9248                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9249                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9250                         //printf("no free regs for store %x\n",start+(k-1)*4);
9251                         break;
9252                       }
9253                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9254                         //printf("no-match due to different register\n");
9255                         break;
9256                       }
9257                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9258                         //printf("no-match due to branch\n");
9259                         break;
9260                       }
9261                       // call/ret fast path assumes no registers allocated
9262                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
9263                         break;
9264                       }
9265                       if(r>63) {
9266                         // NB This can exclude the case where the upper-half
9267                         // register is lower numbered than the lower-half
9268                         // register.  Not sure if it's worth fixing...
9269                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9270                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9271                       }
9272                       k--;
9273                     }
9274                     if(i<slen-1) {
9275                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9276                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9277                         //printf("bad match after branch\n");
9278                         break;
9279                       }
9280                     }
9281                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9282                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9283                       while(k<i) {
9284                         regs[k].regmap_entry[hr]=f_regmap[hr];
9285                         regs[k].regmap[hr]=f_regmap[hr];
9286                         regmap_pre[k+1][hr]=f_regmap[hr];
9287                         regs[k].wasdirty&=~(1<<hr);
9288                         regs[k].dirty&=~(1<<hr);
9289                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9290                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9291                         regs[k].wasconst&=~(1<<hr);
9292                         regs[k].isconst&=~(1<<hr);
9293                         k++;
9294                       }
9295                     }
9296                     else {
9297                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9298                       break;
9299                     }
9300                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9301                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9302                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9303                       regs[i].regmap_entry[hr]=f_regmap[hr];
9304                       regs[i].regmap[hr]=f_regmap[hr];
9305                       regs[i].wasdirty&=~(1<<hr);
9306                       regs[i].dirty&=~(1<<hr);
9307                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9308                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9309                       regs[i].wasconst&=~(1<<hr);
9310                       regs[i].isconst&=~(1<<hr);
9311                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9312                       branch_regs[i].wasdirty&=~(1<<hr);
9313                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9314                       branch_regs[i].regmap[hr]=f_regmap[hr];
9315                       branch_regs[i].dirty&=~(1<<hr);
9316                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9317                       branch_regs[i].wasconst&=~(1<<hr);
9318                       branch_regs[i].isconst&=~(1<<hr);
9319                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9320                         regmap_pre[i+2][hr]=f_regmap[hr];
9321                         regs[i+2].wasdirty&=~(1<<hr);
9322                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9323                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9324                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9325                       }
9326                     }
9327                   }
9328                   for(k=t;k<j;k++) {
9329                     // Alloc register clean at beginning of loop,
9330                     // but may dirty it in pass 6
9331                     regs[k].regmap_entry[hr]=f_regmap[hr];
9332                     regs[k].regmap[hr]=f_regmap[hr];
9333                     regs[k].dirty&=~(1<<hr);
9334                     regs[k].wasconst&=~(1<<hr);
9335                     regs[k].isconst&=~(1<<hr);
9336                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
9337                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
9338                       branch_regs[k].regmap[hr]=f_regmap[hr];
9339                       branch_regs[k].dirty&=~(1<<hr);
9340                       branch_regs[k].wasconst&=~(1<<hr);
9341                       branch_regs[k].isconst&=~(1<<hr);
9342                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
9343                         regmap_pre[k+2][hr]=f_regmap[hr];
9344                         regs[k+2].wasdirty&=~(1<<hr);
9345                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
9346                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
9347                       }
9348                     }
9349                     else
9350                     {
9351                       regmap_pre[k+1][hr]=f_regmap[hr];
9352                       regs[k+1].wasdirty&=~(1<<hr);
9353                     }
9354                   }
9355                   if(regs[j].regmap[hr]==f_regmap[hr])
9356                     regs[j].regmap_entry[hr]=f_regmap[hr];
9357                   break;
9358                 }
9359                 if(j==i) break;
9360                 if(regs[j].regmap[hr]>=0)
9361                   break;
9362                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9363                   //printf("no-match due to different register\n");
9364                   break;
9365                 }
9366                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9367                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9368                   break;
9369                 }
9370                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9371                 {
9372                   // Stop on unconditional branch
9373                   break;
9374                 }
9375                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
9376                 {
9377                   if(ooo[j]) {
9378                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
9379                       break;
9380                   }else{
9381                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
9382                       break;
9383                   }
9384                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
9385                     //printf("no-match due to different register (branch)\n");
9386                     break;
9387                   }
9388                 }
9389                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9390                   //printf("No free regs for store %x\n",start+j*4);
9391                   break;
9392                 }
9393                 if(f_regmap[hr]>=64) {
9394                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9395                     break;
9396                   }
9397                   else
9398                   {
9399                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9400                       break;
9401                     }
9402                   }
9403                 }
9404               }
9405             }
9406           }
9407         }
9408       }
9409     }else{
9410       // Non branch or undetermined branch target
9411       for(hr=0;hr<HOST_REGS;hr++)
9412       {
9413         if(hr!=EXCLUDE_REG) {
9414           if(regs[i].regmap[hr]>64) {
9415             if(!((regs[i].dirty>>hr)&1))
9416               f_regmap[hr]=regs[i].regmap[hr];
9417           }
9418           else if(regs[i].regmap[hr]>=0) {
9419             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9420               // dealloc old register
9421               int n;
9422               for(n=0;n<HOST_REGS;n++)
9423               {
9424                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9425               }
9426               // and alloc new one
9427               f_regmap[hr]=regs[i].regmap[hr];
9428             }
9429           }
9430         }
9431       }
9432       // Try to restore cycle count at branch targets
9433       if(bt[i]) {
9434         for(j=i;j<slen-1;j++) {
9435           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9436           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9437             //printf("no free regs for store %x\n",start+j*4);
9438             break;
9439           }
9440         }
9441         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9442           int k=i;
9443           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9444           while(k<j) {
9445             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9446             regs[k].regmap[HOST_CCREG]=CCREG;
9447             regmap_pre[k+1][HOST_CCREG]=CCREG;
9448             regs[k+1].wasdirty|=1<<HOST_CCREG;
9449             regs[k].dirty|=1<<HOST_CCREG;
9450             regs[k].wasconst&=~(1<<HOST_CCREG);
9451             regs[k].isconst&=~(1<<HOST_CCREG);
9452             k++;
9453           }
9454           regs[j].regmap_entry[HOST_CCREG]=CCREG;
9455         }
9456         // Work backwards from the branch target
9457         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9458         {
9459           //printf("Extend backwards\n");
9460           int k;
9461           k=i;
9462           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9463             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9464               //printf("no free regs for store %x\n",start+(k-1)*4);
9465               break;
9466             }
9467             k--;
9468           }
9469           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9470             //printf("Extend CC, %x ->\n",start+k*4);
9471             while(k<=i) {
9472               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9473               regs[k].regmap[HOST_CCREG]=CCREG;
9474               regmap_pre[k+1][HOST_CCREG]=CCREG;
9475               regs[k+1].wasdirty|=1<<HOST_CCREG;
9476               regs[k].dirty|=1<<HOST_CCREG;
9477               regs[k].wasconst&=~(1<<HOST_CCREG);
9478               regs[k].isconst&=~(1<<HOST_CCREG);
9479               k++;
9480             }
9481           }
9482           else {
9483             //printf("Fail Extend CC, %x ->\n",start+k*4);
9484           }
9485         }
9486       }
9487       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9488          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9489          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9490          itype[i]!=FCONV&&itype[i]!=FCOMP)
9491       {
9492         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9493       }
9494     }
9495   }
9496
9497   // Cache memory offset or tlb map pointer if a register is available
9498   #ifndef HOST_IMM_ADDR32
9499   #ifndef RAM_OFFSET
9500   if(0)
9501   #endif
9502   {
9503     int earliest_available[HOST_REGS];
9504     int loop_start[HOST_REGS];
9505     int score[HOST_REGS];
9506     int end[HOST_REGS];
9507     int reg=ROREG;
9508
9509     // Init
9510     for(hr=0;hr<HOST_REGS;hr++) {
9511       score[hr]=0;earliest_available[hr]=0;
9512       loop_start[hr]=MAXBLOCK;
9513     }
9514     for(i=0;i<slen-1;i++)
9515     {
9516       // Can't do anything if no registers are available
9517       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
9518         for(hr=0;hr<HOST_REGS;hr++) {
9519           score[hr]=0;earliest_available[hr]=i+1;
9520           loop_start[hr]=MAXBLOCK;
9521         }
9522       }
9523       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9524         if(!ooo[i]) {
9525           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
9526             for(hr=0;hr<HOST_REGS;hr++) {
9527               score[hr]=0;earliest_available[hr]=i+1;
9528               loop_start[hr]=MAXBLOCK;
9529             }
9530           }
9531         }else{
9532           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
9533             for(hr=0;hr<HOST_REGS;hr++) {
9534               score[hr]=0;earliest_available[hr]=i+1;
9535               loop_start[hr]=MAXBLOCK;
9536             }
9537           }
9538         }
9539       }
9540       // Mark unavailable registers
9541       for(hr=0;hr<HOST_REGS;hr++) {
9542         if(regs[i].regmap[hr]>=0) {
9543           score[hr]=0;earliest_available[hr]=i+1;
9544           loop_start[hr]=MAXBLOCK;
9545         }
9546         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9547           if(branch_regs[i].regmap[hr]>=0) {
9548             score[hr]=0;earliest_available[hr]=i+2;
9549             loop_start[hr]=MAXBLOCK;
9550           }
9551         }
9552       }
9553       // No register allocations after unconditional jumps
9554       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
9555       {
9556         for(hr=0;hr<HOST_REGS;hr++) {
9557           score[hr]=0;earliest_available[hr]=i+2;
9558           loop_start[hr]=MAXBLOCK;
9559         }
9560         i++; // Skip delay slot too
9561         //printf("skip delay slot: %x\n",start+i*4);
9562       }
9563       else
9564       // Possible match
9565       if(itype[i]==LOAD||itype[i]==LOADLR||
9566          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
9567         for(hr=0;hr<HOST_REGS;hr++) {
9568           if(hr!=EXCLUDE_REG) {
9569             end[hr]=i-1;
9570             for(j=i;j<slen-1;j++) {
9571               if(regs[j].regmap[hr]>=0) break;
9572               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9573                 if(branch_regs[j].regmap[hr]>=0) break;
9574                 if(ooo[j]) {
9575                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
9576                 }else{
9577                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
9578                 }
9579               }
9580               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
9581               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9582                 int t=(ba[j]-start)>>2;
9583                 if(t<j&&t>=earliest_available[hr]) {
9584                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
9585                     // Score a point for hoisting loop invariant
9586                     if(t<loop_start[hr]) loop_start[hr]=t;
9587                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
9588                     score[hr]++;
9589                     end[hr]=j;
9590                   }
9591                 }
9592                 else if(t<j) {
9593                   if(regs[t].regmap[hr]==reg) {
9594                     // Score a point if the branch target matches this register
9595                     score[hr]++;
9596                     end[hr]=j;
9597                   }
9598                 }
9599                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
9600                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
9601                   score[hr]++;
9602                   end[hr]=j;
9603                 }
9604               }
9605               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9606               {
9607                 // Stop on unconditional branch
9608                 break;
9609               }
9610               else
9611               if(itype[j]==LOAD||itype[j]==LOADLR||
9612                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
9613                 score[hr]++;
9614                 end[hr]=j;
9615               }
9616             }
9617           }
9618         }
9619         // Find highest score and allocate that register
9620         int maxscore=0;
9621         for(hr=0;hr<HOST_REGS;hr++) {
9622           if(hr!=EXCLUDE_REG) {
9623             if(score[hr]>score[maxscore]) {
9624               maxscore=hr;
9625               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
9626             }
9627           }
9628         }
9629         if(score[maxscore]>1)
9630         {
9631           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
9632           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
9633             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
9634             assert(regs[j].regmap[maxscore]<0);
9635             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
9636             regs[j].regmap[maxscore]=reg;
9637             regs[j].dirty&=~(1<<maxscore);
9638             regs[j].wasconst&=~(1<<maxscore);
9639             regs[j].isconst&=~(1<<maxscore);
9640             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9641               branch_regs[j].regmap[maxscore]=reg;
9642               branch_regs[j].wasdirty&=~(1<<maxscore);
9643               branch_regs[j].dirty&=~(1<<maxscore);
9644               branch_regs[j].wasconst&=~(1<<maxscore);
9645               branch_regs[j].isconst&=~(1<<maxscore);
9646               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
9647                 regmap_pre[j+2][maxscore]=reg;
9648                 regs[j+2].wasdirty&=~(1<<maxscore);
9649               }
9650               // loop optimization (loop_preload)
9651               int t=(ba[j]-start)>>2;
9652               if(t==loop_start[maxscore]) {
9653                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
9654                   regs[t].regmap_entry[maxscore]=reg;
9655               }
9656             }
9657             else
9658             {
9659               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
9660                 regmap_pre[j+1][maxscore]=reg;
9661                 regs[j+1].wasdirty&=~(1<<maxscore);
9662               }
9663             }
9664           }
9665           i=j-1;
9666           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
9667           for(hr=0;hr<HOST_REGS;hr++) {
9668             score[hr]=0;earliest_available[hr]=i+i;
9669             loop_start[hr]=MAXBLOCK;
9670           }
9671         }
9672       }
9673     }
9674   }
9675   #endif
9676
9677   // This allocates registers (if possible) one instruction prior
9678   // to use, which can avoid a load-use penalty on certain CPUs.
9679   for(i=0;i<slen-1;i++)
9680   {
9681     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9682     {
9683       if(!bt[i+1])
9684       {
9685         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9686            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9687         {
9688           if(rs1[i+1]) {
9689             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9690             {
9691               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9692               {
9693                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9694                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9695                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9696                 regs[i].isconst&=~(1<<hr);
9697                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9698                 constmap[i][hr]=constmap[i+1][hr];
9699                 regs[i+1].wasdirty&=~(1<<hr);
9700                 regs[i].dirty&=~(1<<hr);
9701               }
9702             }
9703           }
9704           if(rs2[i+1]) {
9705             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9706             {
9707               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9708               {
9709                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9710                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9711                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9712                 regs[i].isconst&=~(1<<hr);
9713                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9714                 constmap[i][hr]=constmap[i+1][hr];
9715                 regs[i+1].wasdirty&=~(1<<hr);
9716                 regs[i].dirty&=~(1<<hr);
9717               }
9718             }
9719           }
9720           // Preload target address for load instruction (non-constant)
9721           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9722             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9723             {
9724               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9725               {
9726                 regs[i].regmap[hr]=rs1[i+1];
9727                 regmap_pre[i+1][hr]=rs1[i+1];
9728                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9729                 regs[i].isconst&=~(1<<hr);
9730                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9731                 constmap[i][hr]=constmap[i+1][hr];
9732                 regs[i+1].wasdirty&=~(1<<hr);
9733                 regs[i].dirty&=~(1<<hr);
9734               }
9735             }
9736           }
9737           // Load source into target register
9738           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9739             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9740             {
9741               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9742               {
9743                 regs[i].regmap[hr]=rs1[i+1];
9744                 regmap_pre[i+1][hr]=rs1[i+1];
9745                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9746                 regs[i].isconst&=~(1<<hr);
9747                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9748                 constmap[i][hr]=constmap[i+1][hr];
9749                 regs[i+1].wasdirty&=~(1<<hr);
9750                 regs[i].dirty&=~(1<<hr);
9751               }
9752             }
9753           }
9754           // Address for store instruction (non-constant)
9755           if(itype[i+1]==STORE||itype[i+1]==STORELR
9756              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
9757             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9758               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
9759               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9760               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
9761               assert(hr>=0);
9762               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9763               {
9764                 regs[i].regmap[hr]=rs1[i+1];
9765                 regmap_pre[i+1][hr]=rs1[i+1];
9766                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9767                 regs[i].isconst&=~(1<<hr);
9768                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9769                 constmap[i][hr]=constmap[i+1][hr];
9770                 regs[i+1].wasdirty&=~(1<<hr);
9771                 regs[i].dirty&=~(1<<hr);
9772               }
9773             }
9774           }
9775           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
9776             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9777               int nr;
9778               hr=get_reg(regs[i+1].regmap,FTEMP);
9779               assert(hr>=0);
9780               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9781               {
9782                 regs[i].regmap[hr]=rs1[i+1];
9783                 regmap_pre[i+1][hr]=rs1[i+1];
9784                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9785                 regs[i].isconst&=~(1<<hr);
9786                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9787                 constmap[i][hr]=constmap[i+1][hr];
9788                 regs[i+1].wasdirty&=~(1<<hr);
9789                 regs[i].dirty&=~(1<<hr);
9790               }
9791               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
9792               {
9793                 // move it to another register
9794                 regs[i+1].regmap[hr]=-1;
9795                 regmap_pre[i+2][hr]=-1;
9796                 regs[i+1].regmap[nr]=FTEMP;
9797                 regmap_pre[i+2][nr]=FTEMP;
9798                 regs[i].regmap[nr]=rs1[i+1];
9799                 regmap_pre[i+1][nr]=rs1[i+1];
9800                 regs[i+1].regmap_entry[nr]=rs1[i+1];
9801                 regs[i].isconst&=~(1<<nr);
9802                 regs[i+1].isconst&=~(1<<nr);
9803                 regs[i].dirty&=~(1<<nr);
9804                 regs[i+1].wasdirty&=~(1<<nr);
9805                 regs[i+1].dirty&=~(1<<nr);
9806                 regs[i+2].wasdirty&=~(1<<nr);
9807               }
9808             }
9809           }
9810           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
9811             if(itype[i+1]==LOAD)
9812               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
9813             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
9814               hr=get_reg(regs[i+1].regmap,FTEMP);
9815             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
9816               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
9817               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9818             }
9819             if(hr>=0&&regs[i].regmap[hr]<0) {
9820               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
9821               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
9822                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
9823                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
9824                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
9825                 regs[i].isconst&=~(1<<hr);
9826                 regs[i+1].wasdirty&=~(1<<hr);
9827                 regs[i].dirty&=~(1<<hr);
9828               }
9829             }
9830           }
9831         }
9832       }
9833     }
9834   }
9835
9836   /* Pass 6 - Optimize clean/dirty state */
9837   clean_registers(0,slen-1,1);
9838
9839   /* Pass 7 - Identify 32-bit registers */
9840   for (i=slen-1;i>=0;i--)
9841   {
9842     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9843     {
9844       // Conditional branch
9845       if((source[i]>>16)!=0x1000&&i<slen-2) {
9846         // Mark this address as a branch target since it may be called
9847         // upon return from interrupt
9848         bt[i+2]=1;
9849       }
9850     }
9851   }
9852
9853   if(itype[slen-1]==SPAN) {
9854     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
9855   }
9856
9857 #ifdef DISASM
9858   /* Debug/disassembly */
9859   for(i=0;i<slen;i++)
9860   {
9861     printf("U:");
9862     int r;
9863     for(r=1;r<=CCREG;r++) {
9864       if((unneeded_reg[i]>>r)&1) {
9865         if(r==HIREG) printf(" HI");
9866         else if(r==LOREG) printf(" LO");
9867         else printf(" r%d",r);
9868       }
9869     }
9870     printf("\n");
9871     #if defined(__i386__) || defined(__x86_64__)
9872     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
9873     #endif
9874     #ifdef __arm__
9875     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
9876     #endif
9877     printf("needs: ");
9878     if(needed_reg[i]&1) printf("eax ");
9879     if((needed_reg[i]>>1)&1) printf("ecx ");
9880     if((needed_reg[i]>>2)&1) printf("edx ");
9881     if((needed_reg[i]>>3)&1) printf("ebx ");
9882     if((needed_reg[i]>>5)&1) printf("ebp ");
9883     if((needed_reg[i]>>6)&1) printf("esi ");
9884     if((needed_reg[i]>>7)&1) printf("edi ");
9885     printf("\n");
9886     #if defined(__i386__) || defined(__x86_64__)
9887     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
9888     printf("dirty: ");
9889     if(regs[i].wasdirty&1) printf("eax ");
9890     if((regs[i].wasdirty>>1)&1) printf("ecx ");
9891     if((regs[i].wasdirty>>2)&1) printf("edx ");
9892     if((regs[i].wasdirty>>3)&1) printf("ebx ");
9893     if((regs[i].wasdirty>>5)&1) printf("ebp ");
9894     if((regs[i].wasdirty>>6)&1) printf("esi ");
9895     if((regs[i].wasdirty>>7)&1) printf("edi ");
9896     #endif
9897     #ifdef __arm__
9898     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
9899     printf("dirty: ");
9900     if(regs[i].wasdirty&1) printf("r0 ");
9901     if((regs[i].wasdirty>>1)&1) printf("r1 ");
9902     if((regs[i].wasdirty>>2)&1) printf("r2 ");
9903     if((regs[i].wasdirty>>3)&1) printf("r3 ");
9904     if((regs[i].wasdirty>>4)&1) printf("r4 ");
9905     if((regs[i].wasdirty>>5)&1) printf("r5 ");
9906     if((regs[i].wasdirty>>6)&1) printf("r6 ");
9907     if((regs[i].wasdirty>>7)&1) printf("r7 ");
9908     if((regs[i].wasdirty>>8)&1) printf("r8 ");
9909     if((regs[i].wasdirty>>9)&1) printf("r9 ");
9910     if((regs[i].wasdirty>>10)&1) printf("r10 ");
9911     if((regs[i].wasdirty>>12)&1) printf("r12 ");
9912     #endif
9913     printf("\n");
9914     disassemble_inst(i);
9915     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
9916     #if defined(__i386__) || defined(__x86_64__)
9917     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
9918     if(regs[i].dirty&1) printf("eax ");
9919     if((regs[i].dirty>>1)&1) printf("ecx ");
9920     if((regs[i].dirty>>2)&1) printf("edx ");
9921     if((regs[i].dirty>>3)&1) printf("ebx ");
9922     if((regs[i].dirty>>5)&1) printf("ebp ");
9923     if((regs[i].dirty>>6)&1) printf("esi ");
9924     if((regs[i].dirty>>7)&1) printf("edi ");
9925     #endif
9926     #ifdef __arm__
9927     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
9928     if(regs[i].dirty&1) printf("r0 ");
9929     if((regs[i].dirty>>1)&1) printf("r1 ");
9930     if((regs[i].dirty>>2)&1) printf("r2 ");
9931     if((regs[i].dirty>>3)&1) printf("r3 ");
9932     if((regs[i].dirty>>4)&1) printf("r4 ");
9933     if((regs[i].dirty>>5)&1) printf("r5 ");
9934     if((regs[i].dirty>>6)&1) printf("r6 ");
9935     if((regs[i].dirty>>7)&1) printf("r7 ");
9936     if((regs[i].dirty>>8)&1) printf("r8 ");
9937     if((regs[i].dirty>>9)&1) printf("r9 ");
9938     if((regs[i].dirty>>10)&1) printf("r10 ");
9939     if((regs[i].dirty>>12)&1) printf("r12 ");
9940     #endif
9941     printf("\n");
9942     if(regs[i].isconst) {
9943       printf("constants: ");
9944       #if defined(__i386__) || defined(__x86_64__)
9945       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
9946       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
9947       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
9948       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
9949       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
9950       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
9951       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
9952       #endif
9953       #ifdef __arm__
9954       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
9955       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
9956       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
9957       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
9958       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
9959       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
9960       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
9961       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
9962       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
9963       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
9964       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
9965       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
9966       #endif
9967       printf("\n");
9968     }
9969     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9970       #if defined(__i386__) || defined(__x86_64__)
9971       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
9972       if(branch_regs[i].dirty&1) printf("eax ");
9973       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
9974       if((branch_regs[i].dirty>>2)&1) printf("edx ");
9975       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
9976       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
9977       if((branch_regs[i].dirty>>6)&1) printf("esi ");
9978       if((branch_regs[i].dirty>>7)&1) printf("edi ");
9979       #endif
9980       #ifdef __arm__
9981       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
9982       if(branch_regs[i].dirty&1) printf("r0 ");
9983       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
9984       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
9985       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
9986       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
9987       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
9988       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
9989       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
9990       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
9991       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
9992       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
9993       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
9994       #endif
9995     }
9996   }
9997 #endif // DISASM
9998
9999   /* Pass 8 - Assembly */
10000   linkcount=0;stubcount=0;
10001   ds=0;is_delayslot=0;
10002   cop1_usable=0;
10003   uint64_t is32_pre=0;
10004   u_int dirty_pre=0;
10005   void *beginning=start_block();
10006   if((u_int)addr&1) {
10007     ds=1;
10008     pagespan_ds();
10009   }
10010   u_int instr_addr0_override=0;
10011
10012   if (start == 0x80030000) {
10013     // nasty hack for fastbios thing
10014     // override block entry to this code
10015     instr_addr0_override=(u_int)out;
10016     emit_movimm(start,0);
10017     // abuse io address var as a flag that we
10018     // have already returned here once
10019     emit_readword((int)&address,1);
10020     emit_writeword(0,(int)&pcaddr);
10021     emit_writeword(0,(int)&address);
10022     emit_cmp(0,1);
10023     emit_jne((int)new_dyna_leave);
10024   }
10025   for(i=0;i<slen;i++)
10026   {
10027     //if(ds) printf("ds: ");
10028     disassemble_inst(i);
10029     if(ds) {
10030       ds=0; // Skip delay slot
10031       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10032       instr_addr[i]=0;
10033     } else {
10034       speculate_register_values(i);
10035       #ifndef DESTRUCTIVE_WRITEBACK
10036       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10037       {
10038         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10039               unneeded_reg[i],unneeded_reg_upper[i]);
10040       }
10041       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
10042         is32_pre=branch_regs[i].is32;
10043         dirty_pre=branch_regs[i].dirty;
10044       }else{
10045         is32_pre=regs[i].is32;
10046         dirty_pre=regs[i].dirty;
10047       }
10048       #endif
10049       // write back
10050       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10051       {
10052         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10053                       unneeded_reg[i],unneeded_reg_upper[i]);
10054         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10055       }
10056       // branch target entry point
10057       instr_addr[i]=(u_int)out;
10058       assem_debug("<->\n");
10059       drc_dbg_emit_do_cmp(i);
10060
10061       // load regs
10062       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10063         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10064       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10065       address_generation(i,&regs[i],regs[i].regmap_entry);
10066       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10067       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10068       {
10069         // Load the delay slot registers if necessary
10070         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
10071           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10072         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
10073           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10074         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10075           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10076       }
10077       else if(i+1<slen)
10078       {
10079         // Preload registers for following instruction
10080         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10081           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10082             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10083         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10084           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10085             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10086       }
10087       // TODO: if(is_ooo(i)) address_generation(i+1);
10088       if(itype[i]==CJUMP||itype[i]==FJUMP)
10089         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10090       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10091         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10092       if(bt[i]) cop1_usable=0;
10093       // assemble
10094       switch(itype[i]) {
10095         case ALU:
10096           alu_assemble(i,&regs[i]);break;
10097         case IMM16:
10098           imm16_assemble(i,&regs[i]);break;
10099         case SHIFT:
10100           shift_assemble(i,&regs[i]);break;
10101         case SHIFTIMM:
10102           shiftimm_assemble(i,&regs[i]);break;
10103         case LOAD:
10104           load_assemble(i,&regs[i]);break;
10105         case LOADLR:
10106           loadlr_assemble(i,&regs[i]);break;
10107         case STORE:
10108           store_assemble(i,&regs[i]);break;
10109         case STORELR:
10110           storelr_assemble(i,&regs[i]);break;
10111         case COP0:
10112           cop0_assemble(i,&regs[i]);break;
10113         case COP1:
10114           cop1_assemble(i,&regs[i]);break;
10115         case C1LS:
10116           c1ls_assemble(i,&regs[i]);break;
10117         case COP2:
10118           cop2_assemble(i,&regs[i]);break;
10119         case C2LS:
10120           c2ls_assemble(i,&regs[i]);break;
10121         case C2OP:
10122           c2op_assemble(i,&regs[i]);break;
10123         case FCONV:
10124           fconv_assemble(i,&regs[i]);break;
10125         case FLOAT:
10126           float_assemble(i,&regs[i]);break;
10127         case FCOMP:
10128           fcomp_assemble(i,&regs[i]);break;
10129         case MULTDIV:
10130           multdiv_assemble(i,&regs[i]);break;
10131         case MOV:
10132           mov_assemble(i,&regs[i]);break;
10133         case SYSCALL:
10134           syscall_assemble(i,&regs[i]);break;
10135         case HLECALL:
10136           hlecall_assemble(i,&regs[i]);break;
10137         case INTCALL:
10138           intcall_assemble(i,&regs[i]);break;
10139         case UJUMP:
10140           ujump_assemble(i,&regs[i]);ds=1;break;
10141         case RJUMP:
10142           rjump_assemble(i,&regs[i]);ds=1;break;
10143         case CJUMP:
10144           cjump_assemble(i,&regs[i]);ds=1;break;
10145         case SJUMP:
10146           sjump_assemble(i,&regs[i]);ds=1;break;
10147         case FJUMP:
10148           fjump_assemble(i,&regs[i]);ds=1;break;
10149         case SPAN:
10150           pagespan_assemble(i,&regs[i]);break;
10151       }
10152       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10153         literal_pool(1024);
10154       else
10155         literal_pool_jumpover(256);
10156     }
10157   }
10158   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10159   // If the block did not end with an unconditional branch,
10160   // add a jump to the next instruction.
10161   if(i>1) {
10162     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10163       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10164       assert(i==slen);
10165       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10166         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10167         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10168           emit_loadreg(CCREG,HOST_CCREG);
10169         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10170       }
10171       else if(!likely[i-2])
10172       {
10173         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10174         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10175       }
10176       else
10177       {
10178         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10179         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10180       }
10181       add_to_linker((int)out,start+i*4,0);
10182       emit_jmp(0);
10183     }
10184   }
10185   else
10186   {
10187     assert(i>0);
10188     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10189     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10190     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10191       emit_loadreg(CCREG,HOST_CCREG);
10192     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10193     add_to_linker((int)out,start+i*4,0);
10194     emit_jmp(0);
10195   }
10196
10197   // TODO: delay slot stubs?
10198   // Stubs
10199   for(i=0;i<stubcount;i++)
10200   {
10201     switch(stubs[i][0])
10202     {
10203       case LOADB_STUB:
10204       case LOADH_STUB:
10205       case LOADW_STUB:
10206       case LOADD_STUB:
10207       case LOADBU_STUB:
10208       case LOADHU_STUB:
10209         do_readstub(i);break;
10210       case STOREB_STUB:
10211       case STOREH_STUB:
10212       case STOREW_STUB:
10213       case STORED_STUB:
10214         do_writestub(i);break;
10215       case CC_STUB:
10216         do_ccstub(i);break;
10217       case INVCODE_STUB:
10218         do_invstub(i);break;
10219       case FP_STUB:
10220         do_cop1stub(i);break;
10221       case STORELR_STUB:
10222         do_unalignedwritestub(i);break;
10223     }
10224   }
10225
10226   if (instr_addr0_override)
10227     instr_addr[0] = instr_addr0_override;
10228
10229   /* Pass 9 - Linker */
10230   for(i=0;i<linkcount;i++)
10231   {
10232     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10233     literal_pool(64);
10234     if(!link_addr[i][2])
10235     {
10236       void *stub=out;
10237       void *addr=check_addr(link_addr[i][1]);
10238       emit_extjump(link_addr[i][0],link_addr[i][1]);
10239       if(addr) {
10240         set_jump_target(link_addr[i][0],(int)addr);
10241         add_link(link_addr[i][1],stub);
10242       }
10243       else set_jump_target(link_addr[i][0],(int)stub);
10244     }
10245     else
10246     {
10247       // Internal branch
10248       int target=(link_addr[i][1]-start)>>2;
10249       assert(target>=0&&target<slen);
10250       assert(instr_addr[target]);
10251       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10252       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10253       //#else
10254       set_jump_target(link_addr[i][0],instr_addr[target]);
10255       //#endif
10256     }
10257   }
10258   // External Branch Targets (jump_in)
10259   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10260   for(i=0;i<slen;i++)
10261   {
10262     if(bt[i]||i==0)
10263     {
10264       if(instr_addr[i]) // TODO - delay slots (=null)
10265       {
10266         u_int vaddr=start+i*4;
10267         u_int page=get_page(vaddr);
10268         u_int vpage=get_vpage(vaddr);
10269         literal_pool(256);
10270         {
10271           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10272           assem_debug("jump_in: %x\n",start+i*4);
10273           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10274           int entry_point=do_dirty_stub(i);
10275           ll_add_flags(jump_in+page,vaddr,state_rflags,(void *)entry_point);
10276           // If there was an existing entry in the hash table,
10277           // replace it with the new address.
10278           // Don't add new entries.  We'll insert the
10279           // ones that actually get used in check_addr().
10280           u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10281           if(ht_bin[0]==vaddr) {
10282             ht_bin[1]=entry_point;
10283           }
10284           if(ht_bin[2]==vaddr) {
10285             ht_bin[3]=entry_point;
10286           }
10287         }
10288       }
10289     }
10290   }
10291   // Write out the literal pool if necessary
10292   literal_pool(0);
10293   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10294   // Align code
10295   if(((u_int)out)&7) emit_addnop(13);
10296   #endif
10297   assert((u_int)out-(u_int)beginning<MAX_OUTPUT_BLOCK_SIZE);
10298   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10299   memcpy(copy,source,slen*4);
10300   copy+=slen*4;
10301
10302   end_block(beginning);
10303
10304   // If we're within 256K of the end of the buffer,
10305   // start over from the beginning. (Is 256K enough?)
10306   if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10307
10308   // Trap writes to any of the pages we compiled
10309   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10310     invalid_code[i]=0;
10311   }
10312   inv_code_start=inv_code_end=~0;
10313
10314   // for PCSX we need to mark all mirrors too
10315   if(get_page(start)<(RAM_SIZE>>12))
10316     for(i=start>>12;i<=(start+slen*4)>>12;i++)
10317       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
10318       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
10319       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
10320
10321   /* Pass 10 - Free memory by expiring oldest blocks */
10322
10323   int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10324   while(expirep!=end)
10325   {
10326     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10327     int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10328     inv_debug("EXP: Phase %d\n",expirep);
10329     switch((expirep>>11)&3)
10330     {
10331       case 0:
10332         // Clear jump_in and jump_dirty
10333         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10334         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10335         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10336         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10337         break;
10338       case 1:
10339         // Clear pointers
10340         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10341         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10342         break;
10343       case 2:
10344         // Clear hash table
10345         for(i=0;i<32;i++) {
10346           u_int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10347           if((ht_bin[3]>>shift)==(base>>shift) ||
10348              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10349             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10350             ht_bin[2]=ht_bin[3]=-1;
10351           }
10352           if((ht_bin[1]>>shift)==(base>>shift) ||
10353              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10354             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10355             ht_bin[0]=ht_bin[2];
10356             ht_bin[1]=ht_bin[3];
10357             ht_bin[2]=ht_bin[3]=-1;
10358           }
10359         }
10360         break;
10361       case 3:
10362         // Clear jump_out
10363         #ifdef __arm__
10364         if((expirep&2047)==0)
10365           do_clear_cache();
10366         #endif
10367         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10368         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10369         break;
10370     }
10371     expirep=(expirep+1)&65535;
10372   }
10373   return 0;
10374 }
10375
10376 // vim:shiftwidth=2:expandtab