Properly protect the HLE instructions against corrupted memory. (#189)
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26 #ifdef __MACH__
27 #include <libkern/OSCacheControl.h>
28 #endif
29 #ifdef _3DS
30 #include <3ds_utils.h>
31 #endif
32 #ifdef VITA
33 #include <psp2/kernel/sysmem.h>
34 static int sceBlock;
35 #endif
36
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h" //emulator interface
39 #include "emu_if.h" //emulator interface
40
41 //#define DISASM
42 //#define assem_debug printf
43 //#define inv_debug printf
44 #define assem_debug(...)
45 #define inv_debug(...)
46
47 #ifdef __i386__
48 #include "assem_x86.h"
49 #endif
50 #ifdef __x86_64__
51 #include "assem_x64.h"
52 #endif
53 #ifdef __arm__
54 #include "assem_arm.h"
55 #endif
56
57 #define MAXBLOCK 4096
58 #define MAX_OUTPUT_BLOCK_SIZE 262144
59
60 struct regstat
61 {
62   signed char regmap_entry[HOST_REGS];
63   signed char regmap[HOST_REGS];
64   uint64_t was32;
65   uint64_t is32;
66   uint64_t wasdirty;
67   uint64_t dirty;
68   uint64_t u;
69   uint64_t uu;
70   u_int wasconst;
71   u_int isconst;
72   u_int loadedconst;             // host regs that have constants loaded
73   u_int waswritten;              // MIPS regs that were used as store base before
74 };
75
76 // note: asm depends on this layout
77 struct ll_entry
78 {
79   u_int vaddr;
80   u_int reg_sv_flags;
81   void *addr;
82   struct ll_entry *next;
83 };
84
85   // used by asm:
86   u_char *out;
87   u_int hash_table[65536][4]  __attribute__((aligned(16)));
88   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
89   struct ll_entry *jump_dirty[4096];
90
91   static struct ll_entry *jump_out[4096];
92   static u_int start;
93   static u_int *source;
94   static char insn[MAXBLOCK][10];
95   static u_char itype[MAXBLOCK];
96   static u_char opcode[MAXBLOCK];
97   static u_char opcode2[MAXBLOCK];
98   static u_char bt[MAXBLOCK];
99   static u_char rs1[MAXBLOCK];
100   static u_char rs2[MAXBLOCK];
101   static u_char rt1[MAXBLOCK];
102   static u_char rt2[MAXBLOCK];
103   static u_char us1[MAXBLOCK];
104   static u_char us2[MAXBLOCK];
105   static u_char dep1[MAXBLOCK];
106   static u_char dep2[MAXBLOCK];
107   static u_char lt1[MAXBLOCK];
108   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
109   static uint64_t gte_rt[MAXBLOCK];
110   static uint64_t gte_unneeded[MAXBLOCK];
111   static u_int smrv[32]; // speculated MIPS register values
112   static u_int smrv_strong; // mask or regs that are likely to have correct values
113   static u_int smrv_weak; // same, but somewhat less likely
114   static u_int smrv_strong_next; // same, but after current insn executes
115   static u_int smrv_weak_next;
116   static int imm[MAXBLOCK];
117   static u_int ba[MAXBLOCK];
118   static char likely[MAXBLOCK];
119   static char is_ds[MAXBLOCK];
120   static char ooo[MAXBLOCK];
121   static uint64_t unneeded_reg[MAXBLOCK];
122   static uint64_t unneeded_reg_upper[MAXBLOCK];
123   static uint64_t branch_unneeded_reg[MAXBLOCK];
124   static uint64_t branch_unneeded_reg_upper[MAXBLOCK];
125   static signed char regmap_pre[MAXBLOCK][HOST_REGS];
126   static uint64_t current_constmap[HOST_REGS];
127   static uint64_t constmap[MAXBLOCK][HOST_REGS];
128   static struct regstat regs[MAXBLOCK];
129   static struct regstat branch_regs[MAXBLOCK];
130   static signed char minimum_free_regs[MAXBLOCK];
131   static u_int needed_reg[MAXBLOCK];
132   static u_int wont_dirty[MAXBLOCK];
133   static u_int will_dirty[MAXBLOCK];
134   static int ccadj[MAXBLOCK];
135   static int slen;
136   static u_int instr_addr[MAXBLOCK];
137   static u_int link_addr[MAXBLOCK][3];
138   static int linkcount;
139   static u_int stubs[MAXBLOCK*3][8];
140   static int stubcount;
141   static u_int literals[1024][2];
142   static int literalcount;
143   static int is_delayslot;
144   static int cop1_usable;
145   static char shadow[1048576]  __attribute__((aligned(16)));
146   static void *copy;
147   static int expirep;
148   static u_int stop_after_jal;
149 #ifndef RAM_FIXED
150   static u_int ram_offset;
151 #else
152   static const u_int ram_offset=0;
153 #endif
154
155   int new_dynarec_hacks;
156   int new_dynarec_did_compile;
157   extern u_char restore_candidate[512];
158   extern int cycle_count;
159
160   /* registers that may be allocated */
161   /* 1-31 gpr */
162 #define HIREG 32 // hi
163 #define LOREG 33 // lo
164 #define FSREG 34 // FPU status (FCSR)
165 #define CSREG 35 // Coprocessor status
166 #define CCREG 36 // Cycle count
167 #define INVCP 37 // Pointer to invalid_code
168 //#define MMREG 38 // Pointer to memory_map
169 #define ROREG 39 // ram offset (if rdram!=0x80000000)
170 #define TEMPREG 40
171 #define FTEMP 40 // FPU temporary register
172 #define PTEMP 41 // Prefetch temporary register
173 //#define TLREG 42 // TLB mapping offset
174 #define RHASH 43 // Return address hash
175 #define RHTBL 44 // Return address hash table address
176 #define RTEMP 45 // JR/JALR address register
177 #define MAXREG 45
178 #define AGEN1 46 // Address generation temporary register
179 //#define AGEN2 47 // Address generation temporary register
180 //#define MGEN1 48 // Maptable address generation temporary register
181 //#define MGEN2 49 // Maptable address generation temporary register
182 #define BTREG 50 // Branch target temporary register
183
184   /* instruction types */
185 #define NOP 0     // No operation
186 #define LOAD 1    // Load
187 #define STORE 2   // Store
188 #define LOADLR 3  // Unaligned load
189 #define STORELR 4 // Unaligned store
190 #define MOV 5     // Move
191 #define ALU 6     // Arithmetic/logic
192 #define MULTDIV 7 // Multiply/divide
193 #define SHIFT 8   // Shift by register
194 #define SHIFTIMM 9// Shift by immediate
195 #define IMM16 10  // 16-bit immediate
196 #define RJUMP 11  // Unconditional jump to register
197 #define UJUMP 12  // Unconditional jump
198 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
199 #define SJUMP 14  // Conditional branch (regimm format)
200 #define COP0 15   // Coprocessor 0
201 #define COP1 16   // Coprocessor 1
202 #define C1LS 17   // Coprocessor 1 load/store
203 #define FJUMP 18  // Conditional branch (floating point)
204 #define FLOAT 19  // Floating point unit
205 #define FCONV 20  // Convert integer to float
206 #define FCOMP 21  // Floating point compare (sets FSREG)
207 #define SYSCALL 22// SYSCALL
208 #define OTHER 23  // Other
209 #define SPAN 24   // Branch/delay slot spans 2 pages
210 #define NI 25     // Not implemented
211 #define HLECALL 26// PCSX fake opcodes for HLE
212 #define COP2 27   // Coprocessor 2 move
213 #define C2LS 28   // Coprocessor 2 load/store
214 #define C2OP 29   // Coprocessor 2 operation
215 #define INTCALL 30// Call interpreter to handle rare corner cases
216
217   /* stubs */
218 #define CC_STUB 1
219 #define FP_STUB 2
220 #define LOADB_STUB 3
221 #define LOADH_STUB 4
222 #define LOADW_STUB 5
223 #define LOADD_STUB 6
224 #define LOADBU_STUB 7
225 #define LOADHU_STUB 8
226 #define STOREB_STUB 9
227 #define STOREH_STUB 10
228 #define STOREW_STUB 11
229 #define STORED_STUB 12
230 #define STORELR_STUB 13
231 #define INVCODE_STUB 14
232
233   /* branch codes */
234 #define TAKEN 1
235 #define NOTTAKEN 2
236 #define NULLDS 3
237
238 // asm linkage
239 int new_recompile_block(int addr);
240 void *get_addr_ht(u_int vaddr);
241 void invalidate_block(u_int block);
242 void invalidate_addr(u_int addr);
243 void remove_hash(int vaddr);
244 void dyna_linker();
245 void dyna_linker_ds();
246 void verify_code();
247 void verify_code_vm();
248 void verify_code_ds();
249 void cc_interrupt();
250 void fp_exception();
251 void fp_exception_ds();
252 void jump_syscall_hle();
253 void jump_hlecall();
254 void jump_intcall();
255 void new_dyna_leave();
256
257 // Needed by assembler
258 static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
259 static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
260 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
261 static void load_all_regs(signed char i_regmap[]);
262 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
263 static void load_regs_entry(int t);
264 static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
265
266 static int verify_dirty(u_int *ptr);
267 static int get_final_value(int hr, int i, int *value);
268 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e);
269 static void add_to_linker(int addr,int target,int ext);
270
271 static int tracedebug=0;
272
273 static void mprotect_w_x(void *start, void *end, int is_x)
274 {
275 #ifdef NO_WRITE_EXEC
276   #if defined(VITA)
277   // *Open* enables write on all memory that was
278   // allocated by sceKernelAllocMemBlockForVM()?
279   if (is_x)
280     sceKernelCloseVMDomain();
281   else
282     sceKernelOpenVMDomain();
283   #else
284   u_long mstart = (u_long)start & ~4095ul;
285   u_long mend = (u_long)end;
286   if (mprotect((void *)mstart, mend - mstart,
287                PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
288     SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
289   #endif
290 #endif
291 }
292
293 static void start_tcache_write(void *start, void *end)
294 {
295   mprotect_w_x(start, end, 0);
296 }
297
298 static void end_tcache_write(void *start, void *end)
299 {
300 #ifdef __arm__
301   size_t len = (char *)end - (char *)start;
302   #if   defined(__BLACKBERRY_QNX__)
303   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
304   #elif defined(__MACH__)
305   sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
306   #elif defined(VITA)
307   sceKernelSyncVMDomain(sceBlock, start, len);
308   #elif defined(_3DS)
309   ctr_flush_invalidate_cache();
310   #else
311   __clear_cache(start, end);
312   #endif
313   (void)len;
314 #endif
315
316   mprotect_w_x(start, end, 1);
317 }
318
319 static void *start_block(void)
320 {
321   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
322   if (end > (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2))
323     end = (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2);
324   start_tcache_write(out, end);
325   return out;
326 }
327
328 static void end_block(void *start)
329 {
330   end_tcache_write(start, out);
331 }
332
333 //#define DEBUG_CYCLE_COUNT 1
334
335 #define NO_CYCLE_PENALTY_THR 12
336
337 int cycle_multiplier; // 100 for 1.0
338
339 static int CLOCK_ADJUST(int x)
340 {
341   int s=(x>>31)|1;
342   return (x * cycle_multiplier + s * 50) / 100;
343 }
344
345 static u_int get_page(u_int vaddr)
346 {
347   u_int page=vaddr&~0xe0000000;
348   if (page < 0x1000000)
349     page &= ~0x0e00000; // RAM mirrors
350   page>>=12;
351   if(page>2048) page=2048+(page&2047);
352   return page;
353 }
354
355 // no virtual mem in PCSX
356 static u_int get_vpage(u_int vaddr)
357 {
358   return get_page(vaddr);
359 }
360
361 // Get address from virtual address
362 // This is called from the recompiled JR/JALR instructions
363 void *get_addr(u_int vaddr)
364 {
365   u_int page=get_page(vaddr);
366   u_int vpage=get_vpage(vaddr);
367   struct ll_entry *head;
368   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
369   head=jump_in[page];
370   while(head!=NULL) {
371     if(head->vaddr==vaddr) {
372   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
373       u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
374       ht_bin[3]=ht_bin[1];
375       ht_bin[2]=ht_bin[0];
376       ht_bin[1]=(u_int)head->addr;
377       ht_bin[0]=vaddr;
378       return head->addr;
379     }
380     head=head->next;
381   }
382   head=jump_dirty[vpage];
383   while(head!=NULL) {
384     if(head->vaddr==vaddr) {
385       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
386       // Don't restore blocks which are about to expire from the cache
387       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
388       if(verify_dirty(head->addr)) {
389         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
390         invalid_code[vaddr>>12]=0;
391         inv_code_start=inv_code_end=~0;
392         if(vpage<2048) {
393           restore_candidate[vpage>>3]|=1<<(vpage&7);
394         }
395         else restore_candidate[page>>3]|=1<<(page&7);
396         u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
397         if(ht_bin[0]==vaddr) {
398           ht_bin[1]=(u_int)head->addr; // Replace existing entry
399         }
400         else
401         {
402           ht_bin[3]=ht_bin[1];
403           ht_bin[2]=ht_bin[0];
404           ht_bin[1]=(int)head->addr;
405           ht_bin[0]=vaddr;
406         }
407         return head->addr;
408       }
409     }
410     head=head->next;
411   }
412   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
413   int r=new_recompile_block(vaddr);
414   if(r==0) return get_addr(vaddr);
415   // Execute in unmapped page, generate pagefault execption
416   Status|=2;
417   Cause=(vaddr<<31)|0x8;
418   EPC=(vaddr&1)?vaddr-5:vaddr;
419   BadVAddr=(vaddr&~1);
420   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
421   EntryHi=BadVAddr&0xFFFFE000;
422   return get_addr_ht(0x80000000);
423 }
424 // Look up address in hash table first
425 void *get_addr_ht(u_int vaddr)
426 {
427   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
428   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
429   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
430   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
431   return get_addr(vaddr);
432 }
433
434 void clear_all_regs(signed char regmap[])
435 {
436   int hr;
437   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
438 }
439
440 signed char get_reg(signed char regmap[],int r)
441 {
442   int hr;
443   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
444   return -1;
445 }
446
447 // Find a register that is available for two consecutive cycles
448 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
449 {
450   int hr;
451   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
452   return -1;
453 }
454
455 int count_free_regs(signed char regmap[])
456 {
457   int count=0;
458   int hr;
459   for(hr=0;hr<HOST_REGS;hr++)
460   {
461     if(hr!=EXCLUDE_REG) {
462       if(regmap[hr]<0) count++;
463     }
464   }
465   return count;
466 }
467
468 void dirty_reg(struct regstat *cur,signed char reg)
469 {
470   int hr;
471   if(!reg) return;
472   for (hr=0;hr<HOST_REGS;hr++) {
473     if((cur->regmap[hr]&63)==reg) {
474       cur->dirty|=1<<hr;
475     }
476   }
477 }
478
479 // If we dirty the lower half of a 64 bit register which is now being
480 // sign-extended, we need to dump the upper half.
481 // Note: Do this only after completion of the instruction, because
482 // some instructions may need to read the full 64-bit value even if
483 // overwriting it (eg SLTI, DSRA32).
484 static void flush_dirty_uppers(struct regstat *cur)
485 {
486   int hr,reg;
487   for (hr=0;hr<HOST_REGS;hr++) {
488     if((cur->dirty>>hr)&1) {
489       reg=cur->regmap[hr];
490       if(reg>=64)
491         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
492     }
493   }
494 }
495
496 void set_const(struct regstat *cur,signed char reg,uint64_t value)
497 {
498   int hr;
499   if(!reg) return;
500   for (hr=0;hr<HOST_REGS;hr++) {
501     if(cur->regmap[hr]==reg) {
502       cur->isconst|=1<<hr;
503       current_constmap[hr]=value;
504     }
505     else if((cur->regmap[hr]^64)==reg) {
506       cur->isconst|=1<<hr;
507       current_constmap[hr]=value>>32;
508     }
509   }
510 }
511
512 void clear_const(struct regstat *cur,signed char reg)
513 {
514   int hr;
515   if(!reg) return;
516   for (hr=0;hr<HOST_REGS;hr++) {
517     if((cur->regmap[hr]&63)==reg) {
518       cur->isconst&=~(1<<hr);
519     }
520   }
521 }
522
523 int is_const(struct regstat *cur,signed char reg)
524 {
525   int hr;
526   if(reg<0) return 0;
527   if(!reg) return 1;
528   for (hr=0;hr<HOST_REGS;hr++) {
529     if((cur->regmap[hr]&63)==reg) {
530       return (cur->isconst>>hr)&1;
531     }
532   }
533   return 0;
534 }
535 uint64_t get_const(struct regstat *cur,signed char reg)
536 {
537   int hr;
538   if(!reg) return 0;
539   for (hr=0;hr<HOST_REGS;hr++) {
540     if(cur->regmap[hr]==reg) {
541       return current_constmap[hr];
542     }
543   }
544   SysPrintf("Unknown constant in r%d\n",reg);
545   exit(1);
546 }
547
548 // Least soon needed registers
549 // Look at the next ten instructions and see which registers
550 // will be used.  Try not to reallocate these.
551 void lsn(u_char hsn[], int i, int *preferred_reg)
552 {
553   int j;
554   int b=-1;
555   for(j=0;j<9;j++)
556   {
557     if(i+j>=slen) {
558       j=slen-i-1;
559       break;
560     }
561     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
562     {
563       // Don't go past an unconditonal jump
564       j++;
565       break;
566     }
567   }
568   for(;j>=0;j--)
569   {
570     if(rs1[i+j]) hsn[rs1[i+j]]=j;
571     if(rs2[i+j]) hsn[rs2[i+j]]=j;
572     if(rt1[i+j]) hsn[rt1[i+j]]=j;
573     if(rt2[i+j]) hsn[rt2[i+j]]=j;
574     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
575       // Stores can allocate zero
576       hsn[rs1[i+j]]=j;
577       hsn[rs2[i+j]]=j;
578     }
579     // On some architectures stores need invc_ptr
580     #if defined(HOST_IMM8)
581     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
582       hsn[INVCP]=j;
583     }
584     #endif
585     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
586     {
587       hsn[CCREG]=j;
588       b=j;
589     }
590   }
591   if(b>=0)
592   {
593     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
594     {
595       // Follow first branch
596       int t=(ba[i+b]-start)>>2;
597       j=7-b;if(t+j>=slen) j=slen-t-1;
598       for(;j>=0;j--)
599       {
600         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
601         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
602         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
603         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
604       }
605     }
606     // TODO: preferred register based on backward branch
607   }
608   // Delay slot should preferably not overwrite branch conditions or cycle count
609   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
610     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
611     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
612     hsn[CCREG]=1;
613     // ...or hash tables
614     hsn[RHASH]=1;
615     hsn[RHTBL]=1;
616   }
617   // Coprocessor load/store needs FTEMP, even if not declared
618   if(itype[i]==C1LS||itype[i]==C2LS) {
619     hsn[FTEMP]=0;
620   }
621   // Load L/R also uses FTEMP as a temporary register
622   if(itype[i]==LOADLR) {
623     hsn[FTEMP]=0;
624   }
625   // Also SWL/SWR/SDL/SDR
626   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
627     hsn[FTEMP]=0;
628   }
629   // Don't remove the miniht registers
630   if(itype[i]==UJUMP||itype[i]==RJUMP)
631   {
632     hsn[RHASH]=0;
633     hsn[RHTBL]=0;
634   }
635 }
636
637 // We only want to allocate registers if we're going to use them again soon
638 int needed_again(int r, int i)
639 {
640   int j;
641   int b=-1;
642   int rn=10;
643
644   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
645   {
646     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
647       return 0; // Don't need any registers if exiting the block
648   }
649   for(j=0;j<9;j++)
650   {
651     if(i+j>=slen) {
652       j=slen-i-1;
653       break;
654     }
655     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
656     {
657       // Don't go past an unconditonal jump
658       j++;
659       break;
660     }
661     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
662     {
663       break;
664     }
665   }
666   for(;j>=1;j--)
667   {
668     if(rs1[i+j]==r) rn=j;
669     if(rs2[i+j]==r) rn=j;
670     if((unneeded_reg[i+j]>>r)&1) rn=10;
671     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
672     {
673       b=j;
674     }
675   }
676   /*
677   if(b>=0)
678   {
679     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
680     {
681       // Follow first branch
682       int o=rn;
683       int t=(ba[i+b]-start)>>2;
684       j=7-b;if(t+j>=slen) j=slen-t-1;
685       for(;j>=0;j--)
686       {
687         if(!((unneeded_reg[t+j]>>r)&1)) {
688           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
689           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
690         }
691         else rn=o;
692       }
693     }
694   }*/
695   if(rn<10) return 1;
696   (void)b;
697   return 0;
698 }
699
700 // Try to match register allocations at the end of a loop with those
701 // at the beginning
702 int loop_reg(int i, int r, int hr)
703 {
704   int j,k;
705   for(j=0;j<9;j++)
706   {
707     if(i+j>=slen) {
708       j=slen-i-1;
709       break;
710     }
711     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
712     {
713       // Don't go past an unconditonal jump
714       j++;
715       break;
716     }
717   }
718   k=0;
719   if(i>0){
720     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
721       k--;
722   }
723   for(;k<j;k++)
724   {
725     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
726     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
727     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
728     {
729       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
730       {
731         int t=(ba[i+k]-start)>>2;
732         int reg=get_reg(regs[t].regmap_entry,r);
733         if(reg>=0) return reg;
734         //reg=get_reg(regs[t+1].regmap_entry,r);
735         //if(reg>=0) return reg;
736       }
737     }
738   }
739   return hr;
740 }
741
742
743 // Allocate every register, preserving source/target regs
744 void alloc_all(struct regstat *cur,int i)
745 {
746   int hr;
747
748   for(hr=0;hr<HOST_REGS;hr++) {
749     if(hr!=EXCLUDE_REG) {
750       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
751          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
752       {
753         cur->regmap[hr]=-1;
754         cur->dirty&=~(1<<hr);
755       }
756       // Don't need zeros
757       if((cur->regmap[hr]&63)==0)
758       {
759         cur->regmap[hr]=-1;
760         cur->dirty&=~(1<<hr);
761       }
762     }
763   }
764 }
765
766 #ifdef __i386__
767 #include "assem_x86.c"
768 #endif
769 #ifdef __x86_64__
770 #include "assem_x64.c"
771 #endif
772 #ifdef __arm__
773 #include "assem_arm.c"
774 #endif
775
776 // Add virtual address mapping to linked list
777 void ll_add(struct ll_entry **head,int vaddr,void *addr)
778 {
779   struct ll_entry *new_entry;
780   new_entry=malloc(sizeof(struct ll_entry));
781   assert(new_entry!=NULL);
782   new_entry->vaddr=vaddr;
783   new_entry->reg_sv_flags=0;
784   new_entry->addr=addr;
785   new_entry->next=*head;
786   *head=new_entry;
787 }
788
789 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
790 {
791   ll_add(head,vaddr,addr);
792   (*head)->reg_sv_flags=reg_sv_flags;
793 }
794
795 // Check if an address is already compiled
796 // but don't return addresses which are about to expire from the cache
797 void *check_addr(u_int vaddr)
798 {
799   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
800   if(ht_bin[0]==vaddr) {
801     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
802       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
803   }
804   if(ht_bin[2]==vaddr) {
805     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
806       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
807   }
808   u_int page=get_page(vaddr);
809   struct ll_entry *head;
810   head=jump_in[page];
811   while(head!=NULL) {
812     if(head->vaddr==vaddr) {
813       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
814         // Update existing entry with current address
815         if(ht_bin[0]==vaddr) {
816           ht_bin[1]=(int)head->addr;
817           return head->addr;
818         }
819         if(ht_bin[2]==vaddr) {
820           ht_bin[3]=(int)head->addr;
821           return head->addr;
822         }
823         // Insert into hash table with low priority.
824         // Don't evict existing entries, as they are probably
825         // addresses that are being accessed frequently.
826         if(ht_bin[0]==-1) {
827           ht_bin[1]=(int)head->addr;
828           ht_bin[0]=vaddr;
829         }else if(ht_bin[2]==-1) {
830           ht_bin[3]=(int)head->addr;
831           ht_bin[2]=vaddr;
832         }
833         return head->addr;
834       }
835     }
836     head=head->next;
837   }
838   return 0;
839 }
840
841 void remove_hash(int vaddr)
842 {
843   //printf("remove hash: %x\n",vaddr);
844   u_int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
845   if(ht_bin[2]==vaddr) {
846     ht_bin[2]=ht_bin[3]=-1;
847   }
848   if(ht_bin[0]==vaddr) {
849     ht_bin[0]=ht_bin[2];
850     ht_bin[1]=ht_bin[3];
851     ht_bin[2]=ht_bin[3]=-1;
852   }
853 }
854
855 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
856 {
857   struct ll_entry *next;
858   while(*head) {
859     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
860        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
861     {
862       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
863       remove_hash((*head)->vaddr);
864       next=(*head)->next;
865       free(*head);
866       *head=next;
867     }
868     else
869     {
870       head=&((*head)->next);
871     }
872   }
873 }
874
875 // Remove all entries from linked list
876 void ll_clear(struct ll_entry **head)
877 {
878   struct ll_entry *cur;
879   struct ll_entry *next;
880   if((cur=*head)) {
881     *head=0;
882     while(cur) {
883       next=cur->next;
884       free(cur);
885       cur=next;
886     }
887   }
888 }
889
890 // Dereference the pointers and remove if it matches
891 static void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
892 {
893   while(head) {
894     int ptr=get_pointer(head->addr);
895     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
896     if(((ptr>>shift)==(addr>>shift)) ||
897        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
898     {
899       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
900       void *host_addr=find_extjump_insn(head->addr);
901       #ifdef __arm__
902         mark_clear_cache(host_addr);
903       #endif
904       set_jump_target((int)host_addr,(int)head->addr);
905     }
906     head=head->next;
907   }
908 }
909
910 // This is called when we write to a compiled block (see do_invstub)
911 void invalidate_page(u_int page)
912 {
913   struct ll_entry *head;
914   struct ll_entry *next;
915   head=jump_in[page];
916   jump_in[page]=0;
917   while(head!=NULL) {
918     inv_debug("INVALIDATE: %x\n",head->vaddr);
919     remove_hash(head->vaddr);
920     next=head->next;
921     free(head);
922     head=next;
923   }
924   head=jump_out[page];
925   jump_out[page]=0;
926   while(head!=NULL) {
927     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
928     void *host_addr=find_extjump_insn(head->addr);
929     #ifdef __arm__
930       mark_clear_cache(host_addr);
931     #endif
932     set_jump_target((int)host_addr,(int)head->addr);
933     next=head->next;
934     free(head);
935     head=next;
936   }
937 }
938
939 static void invalidate_block_range(u_int block, u_int first, u_int last)
940 {
941   u_int page=get_page(block<<12);
942   //printf("first=%d last=%d\n",first,last);
943   invalidate_page(page);
944   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
945   assert(last<page+5);
946   // Invalidate the adjacent pages if a block crosses a 4K boundary
947   while(first<page) {
948     invalidate_page(first);
949     first++;
950   }
951   for(first=page+1;first<last;first++) {
952     invalidate_page(first);
953   }
954   #ifdef __arm__
955     do_clear_cache();
956   #endif
957
958   // Don't trap writes
959   invalid_code[block]=1;
960
961   #ifdef USE_MINI_HT
962   memset(mini_ht,-1,sizeof(mini_ht));
963   #endif
964 }
965
966 void invalidate_block(u_int block)
967 {
968   u_int page=get_page(block<<12);
969   u_int vpage=get_vpage(block<<12);
970   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
971   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
972   u_int first,last;
973   first=last=page;
974   struct ll_entry *head;
975   head=jump_dirty[vpage];
976   //printf("page=%d vpage=%d\n",page,vpage);
977   while(head!=NULL) {
978     u_int start,end;
979     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
980       get_bounds((int)head->addr,&start,&end);
981       //printf("start: %x end: %x\n",start,end);
982       if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
983         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
984           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
985           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
986         }
987       }
988     }
989     head=head->next;
990   }
991   invalidate_block_range(block,first,last);
992 }
993
994 void invalidate_addr(u_int addr)
995 {
996   //static int rhits;
997   // this check is done by the caller
998   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
999   u_int page=get_vpage(addr);
1000   if(page<2048) { // RAM
1001     struct ll_entry *head;
1002     u_int addr_min=~0, addr_max=0;
1003     u_int mask=RAM_SIZE-1;
1004     u_int addr_main=0x80000000|(addr&mask);
1005     int pg1;
1006     inv_code_start=addr_main&~0xfff;
1007     inv_code_end=addr_main|0xfff;
1008     pg1=page;
1009     if (pg1>0) {
1010       // must check previous page too because of spans..
1011       pg1--;
1012       inv_code_start-=0x1000;
1013     }
1014     for(;pg1<=page;pg1++) {
1015       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1016         u_int start,end;
1017         get_bounds((int)head->addr,&start,&end);
1018         if(ram_offset) {
1019           start-=ram_offset;
1020           end-=ram_offset;
1021         }
1022         if(start<=addr_main&&addr_main<end) {
1023           if(start<addr_min) addr_min=start;
1024           if(end>addr_max) addr_max=end;
1025         }
1026         else if(addr_main<start) {
1027           if(start<inv_code_end)
1028             inv_code_end=start-1;
1029         }
1030         else {
1031           if(end>inv_code_start)
1032             inv_code_start=end;
1033         }
1034       }
1035     }
1036     if (addr_min!=~0) {
1037       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1038       inv_code_start=inv_code_end=~0;
1039       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1040       return;
1041     }
1042     else {
1043       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1044       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1045       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1046       return;
1047     }
1048   }
1049   invalidate_block(addr>>12);
1050 }
1051
1052 // This is called when loading a save state.
1053 // Anything could have changed, so invalidate everything.
1054 void invalidate_all_pages()
1055 {
1056   u_int page;
1057   for(page=0;page<4096;page++)
1058     invalidate_page(page);
1059   for(page=0;page<1048576;page++)
1060     if(!invalid_code[page]) {
1061       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1062       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1063     }
1064   #ifdef USE_MINI_HT
1065   memset(mini_ht,-1,sizeof(mini_ht));
1066   #endif
1067 }
1068
1069 // Add an entry to jump_out after making a link
1070 void add_link(u_int vaddr,void *src)
1071 {
1072   u_int page=get_page(vaddr);
1073   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1074   int *ptr=(int *)(src+4);
1075   assert((*ptr&0x0fff0000)==0x059f0000);
1076   (void)ptr;
1077   ll_add(jump_out+page,vaddr,src);
1078   //int ptr=get_pointer(src);
1079   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1080 }
1081
1082 // If a code block was found to be unmodified (bit was set in
1083 // restore_candidate) and it remains unmodified (bit is clear
1084 // in invalid_code) then move the entries for that 4K page from
1085 // the dirty list to the clean list.
1086 void clean_blocks(u_int page)
1087 {
1088   struct ll_entry *head;
1089   inv_debug("INV: clean_blocks page=%d\n",page);
1090   head=jump_dirty[page];
1091   while(head!=NULL) {
1092     if(!invalid_code[head->vaddr>>12]) {
1093       // Don't restore blocks which are about to expire from the cache
1094       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1095         u_int start,end;
1096         if(verify_dirty(head->addr)) {
1097           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1098           u_int i;
1099           u_int inv=0;
1100           get_bounds((int)head->addr,&start,&end);
1101           if(start-(u_int)rdram<RAM_SIZE) {
1102             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1103               inv|=invalid_code[i];
1104             }
1105           }
1106           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1107             inv=1;
1108           }
1109           if(!inv) {
1110             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1111             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1112               u_int ppage=page;
1113               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1114               //printf("page=%x, addr=%x\n",page,head->vaddr);
1115               //assert(head->vaddr>>12==(page|0x80000));
1116               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1117               u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1118               if(ht_bin[0]==head->vaddr) {
1119                 ht_bin[1]=(u_int)clean_addr; // Replace existing entry
1120               }
1121               if(ht_bin[2]==head->vaddr) {
1122                 ht_bin[3]=(u_int)clean_addr; // Replace existing entry
1123               }
1124             }
1125           }
1126         }
1127       }
1128     }
1129     head=head->next;
1130   }
1131 }
1132
1133
1134 void mov_alloc(struct regstat *current,int i)
1135 {
1136   // Note: Don't need to actually alloc the source registers
1137   if((~current->is32>>rs1[i])&1) {
1138     //alloc_reg64(current,i,rs1[i]);
1139     alloc_reg64(current,i,rt1[i]);
1140     current->is32&=~(1LL<<rt1[i]);
1141   } else {
1142     //alloc_reg(current,i,rs1[i]);
1143     alloc_reg(current,i,rt1[i]);
1144     current->is32|=(1LL<<rt1[i]);
1145   }
1146   clear_const(current,rs1[i]);
1147   clear_const(current,rt1[i]);
1148   dirty_reg(current,rt1[i]);
1149 }
1150
1151 void shiftimm_alloc(struct regstat *current,int i)
1152 {
1153   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1154   {
1155     if(rt1[i]) {
1156       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1157       else lt1[i]=rs1[i];
1158       alloc_reg(current,i,rt1[i]);
1159       current->is32|=1LL<<rt1[i];
1160       dirty_reg(current,rt1[i]);
1161       if(is_const(current,rs1[i])) {
1162         int v=get_const(current,rs1[i]);
1163         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1164         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1165         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1166       }
1167       else clear_const(current,rt1[i]);
1168     }
1169   }
1170   else
1171   {
1172     clear_const(current,rs1[i]);
1173     clear_const(current,rt1[i]);
1174   }
1175
1176   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1177   {
1178     if(rt1[i]) {
1179       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1180       alloc_reg64(current,i,rt1[i]);
1181       current->is32&=~(1LL<<rt1[i]);
1182       dirty_reg(current,rt1[i]);
1183     }
1184   }
1185   if(opcode2[i]==0x3c) // DSLL32
1186   {
1187     if(rt1[i]) {
1188       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1189       alloc_reg64(current,i,rt1[i]);
1190       current->is32&=~(1LL<<rt1[i]);
1191       dirty_reg(current,rt1[i]);
1192     }
1193   }
1194   if(opcode2[i]==0x3e) // DSRL32
1195   {
1196     if(rt1[i]) {
1197       alloc_reg64(current,i,rs1[i]);
1198       if(imm[i]==32) {
1199         alloc_reg64(current,i,rt1[i]);
1200         current->is32&=~(1LL<<rt1[i]);
1201       } else {
1202         alloc_reg(current,i,rt1[i]);
1203         current->is32|=1LL<<rt1[i];
1204       }
1205       dirty_reg(current,rt1[i]);
1206     }
1207   }
1208   if(opcode2[i]==0x3f) // DSRA32
1209   {
1210     if(rt1[i]) {
1211       alloc_reg64(current,i,rs1[i]);
1212       alloc_reg(current,i,rt1[i]);
1213       current->is32|=1LL<<rt1[i];
1214       dirty_reg(current,rt1[i]);
1215     }
1216   }
1217 }
1218
1219 void shift_alloc(struct regstat *current,int i)
1220 {
1221   if(rt1[i]) {
1222     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1223     {
1224       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1225       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1226       alloc_reg(current,i,rt1[i]);
1227       if(rt1[i]==rs2[i]) {
1228         alloc_reg_temp(current,i,-1);
1229         minimum_free_regs[i]=1;
1230       }
1231       current->is32|=1LL<<rt1[i];
1232     } else { // DSLLV/DSRLV/DSRAV
1233       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1234       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1235       alloc_reg64(current,i,rt1[i]);
1236       current->is32&=~(1LL<<rt1[i]);
1237       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1238       {
1239         alloc_reg_temp(current,i,-1);
1240         minimum_free_regs[i]=1;
1241       }
1242     }
1243     clear_const(current,rs1[i]);
1244     clear_const(current,rs2[i]);
1245     clear_const(current,rt1[i]);
1246     dirty_reg(current,rt1[i]);
1247   }
1248 }
1249
1250 void alu_alloc(struct regstat *current,int i)
1251 {
1252   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1253     if(rt1[i]) {
1254       if(rs1[i]&&rs2[i]) {
1255         alloc_reg(current,i,rs1[i]);
1256         alloc_reg(current,i,rs2[i]);
1257       }
1258       else {
1259         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1260         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1261       }
1262       alloc_reg(current,i,rt1[i]);
1263     }
1264     current->is32|=1LL<<rt1[i];
1265   }
1266   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1267     if(rt1[i]) {
1268       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1269       {
1270         alloc_reg64(current,i,rs1[i]);
1271         alloc_reg64(current,i,rs2[i]);
1272         alloc_reg(current,i,rt1[i]);
1273       } else {
1274         alloc_reg(current,i,rs1[i]);
1275         alloc_reg(current,i,rs2[i]);
1276         alloc_reg(current,i,rt1[i]);
1277       }
1278     }
1279     current->is32|=1LL<<rt1[i];
1280   }
1281   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1282     if(rt1[i]) {
1283       if(rs1[i]&&rs2[i]) {
1284         alloc_reg(current,i,rs1[i]);
1285         alloc_reg(current,i,rs2[i]);
1286       }
1287       else
1288       {
1289         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1290         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1291       }
1292       alloc_reg(current,i,rt1[i]);
1293       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1294       {
1295         if(!((current->uu>>rt1[i])&1)) {
1296           alloc_reg64(current,i,rt1[i]);
1297         }
1298         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1299           if(rs1[i]&&rs2[i]) {
1300             alloc_reg64(current,i,rs1[i]);
1301             alloc_reg64(current,i,rs2[i]);
1302           }
1303           else
1304           {
1305             // Is is really worth it to keep 64-bit values in registers?
1306             #ifdef NATIVE_64BIT
1307             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1308             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1309             #endif
1310           }
1311         }
1312         current->is32&=~(1LL<<rt1[i]);
1313       } else {
1314         current->is32|=1LL<<rt1[i];
1315       }
1316     }
1317   }
1318   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1319     if(rt1[i]) {
1320       if(rs1[i]&&rs2[i]) {
1321         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1322           alloc_reg64(current,i,rs1[i]);
1323           alloc_reg64(current,i,rs2[i]);
1324           alloc_reg64(current,i,rt1[i]);
1325         } else {
1326           alloc_reg(current,i,rs1[i]);
1327           alloc_reg(current,i,rs2[i]);
1328           alloc_reg(current,i,rt1[i]);
1329         }
1330       }
1331       else {
1332         alloc_reg(current,i,rt1[i]);
1333         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1334           // DADD used as move, or zeroing
1335           // If we have a 64-bit source, then make the target 64 bits too
1336           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1337             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1338             alloc_reg64(current,i,rt1[i]);
1339           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1340             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1341             alloc_reg64(current,i,rt1[i]);
1342           }
1343           if(opcode2[i]>=0x2e&&rs2[i]) {
1344             // DSUB used as negation - 64-bit result
1345             // If we have a 32-bit register, extend it to 64 bits
1346             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1347             alloc_reg64(current,i,rt1[i]);
1348           }
1349         }
1350       }
1351       if(rs1[i]&&rs2[i]) {
1352         current->is32&=~(1LL<<rt1[i]);
1353       } else if(rs1[i]) {
1354         current->is32&=~(1LL<<rt1[i]);
1355         if((current->is32>>rs1[i])&1)
1356           current->is32|=1LL<<rt1[i];
1357       } else if(rs2[i]) {
1358         current->is32&=~(1LL<<rt1[i]);
1359         if((current->is32>>rs2[i])&1)
1360           current->is32|=1LL<<rt1[i];
1361       } else {
1362         current->is32|=1LL<<rt1[i];
1363       }
1364     }
1365   }
1366   clear_const(current,rs1[i]);
1367   clear_const(current,rs2[i]);
1368   clear_const(current,rt1[i]);
1369   dirty_reg(current,rt1[i]);
1370 }
1371
1372 void imm16_alloc(struct regstat *current,int i)
1373 {
1374   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1375   else lt1[i]=rs1[i];
1376   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1377   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1378     current->is32&=~(1LL<<rt1[i]);
1379     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1380       // TODO: Could preserve the 32-bit flag if the immediate is zero
1381       alloc_reg64(current,i,rt1[i]);
1382       alloc_reg64(current,i,rs1[i]);
1383     }
1384     clear_const(current,rs1[i]);
1385     clear_const(current,rt1[i]);
1386   }
1387   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1388     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1389     current->is32|=1LL<<rt1[i];
1390     clear_const(current,rs1[i]);
1391     clear_const(current,rt1[i]);
1392   }
1393   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1394     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1395       if(rs1[i]!=rt1[i]) {
1396         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1397         alloc_reg64(current,i,rt1[i]);
1398         current->is32&=~(1LL<<rt1[i]);
1399       }
1400     }
1401     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1402     if(is_const(current,rs1[i])) {
1403       int v=get_const(current,rs1[i]);
1404       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1405       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1406       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1407     }
1408     else clear_const(current,rt1[i]);
1409   }
1410   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1411     if(is_const(current,rs1[i])) {
1412       int v=get_const(current,rs1[i]);
1413       set_const(current,rt1[i],v+imm[i]);
1414     }
1415     else clear_const(current,rt1[i]);
1416     current->is32|=1LL<<rt1[i];
1417   }
1418   else {
1419     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1420     current->is32|=1LL<<rt1[i];
1421   }
1422   dirty_reg(current,rt1[i]);
1423 }
1424
1425 void load_alloc(struct regstat *current,int i)
1426 {
1427   clear_const(current,rt1[i]);
1428   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1429   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1430   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1431   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1432     alloc_reg(current,i,rt1[i]);
1433     assert(get_reg(current->regmap,rt1[i])>=0);
1434     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1435     {
1436       current->is32&=~(1LL<<rt1[i]);
1437       alloc_reg64(current,i,rt1[i]);
1438     }
1439     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1440     {
1441       current->is32&=~(1LL<<rt1[i]);
1442       alloc_reg64(current,i,rt1[i]);
1443       alloc_all(current,i);
1444       alloc_reg64(current,i,FTEMP);
1445       minimum_free_regs[i]=HOST_REGS;
1446     }
1447     else current->is32|=1LL<<rt1[i];
1448     dirty_reg(current,rt1[i]);
1449     // LWL/LWR need a temporary register for the old value
1450     if(opcode[i]==0x22||opcode[i]==0x26)
1451     {
1452       alloc_reg(current,i,FTEMP);
1453       alloc_reg_temp(current,i,-1);
1454       minimum_free_regs[i]=1;
1455     }
1456   }
1457   else
1458   {
1459     // Load to r0 or unneeded register (dummy load)
1460     // but we still need a register to calculate the address
1461     if(opcode[i]==0x22||opcode[i]==0x26)
1462     {
1463       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1464     }
1465     alloc_reg_temp(current,i,-1);
1466     minimum_free_regs[i]=1;
1467     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1468     {
1469       alloc_all(current,i);
1470       alloc_reg64(current,i,FTEMP);
1471       minimum_free_regs[i]=HOST_REGS;
1472     }
1473   }
1474 }
1475
1476 void store_alloc(struct regstat *current,int i)
1477 {
1478   clear_const(current,rs2[i]);
1479   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1480   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1481   alloc_reg(current,i,rs2[i]);
1482   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1483     alloc_reg64(current,i,rs2[i]);
1484     if(rs2[i]) alloc_reg(current,i,FTEMP);
1485   }
1486   #if defined(HOST_IMM8)
1487   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1488   else alloc_reg(current,i,INVCP);
1489   #endif
1490   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1491     alloc_reg(current,i,FTEMP);
1492   }
1493   // We need a temporary register for address generation
1494   alloc_reg_temp(current,i,-1);
1495   minimum_free_regs[i]=1;
1496 }
1497
1498 void c1ls_alloc(struct regstat *current,int i)
1499 {
1500   //clear_const(current,rs1[i]); // FIXME
1501   clear_const(current,rt1[i]);
1502   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1503   alloc_reg(current,i,CSREG); // Status
1504   alloc_reg(current,i,FTEMP);
1505   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1506     alloc_reg64(current,i,FTEMP);
1507   }
1508   #if defined(HOST_IMM8)
1509   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1510   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1511     alloc_reg(current,i,INVCP);
1512   #endif
1513   // We need a temporary register for address generation
1514   alloc_reg_temp(current,i,-1);
1515 }
1516
1517 void c2ls_alloc(struct regstat *current,int i)
1518 {
1519   clear_const(current,rt1[i]);
1520   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1521   alloc_reg(current,i,FTEMP);
1522   #if defined(HOST_IMM8)
1523   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1524   if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1525     alloc_reg(current,i,INVCP);
1526   #endif
1527   // We need a temporary register for address generation
1528   alloc_reg_temp(current,i,-1);
1529   minimum_free_regs[i]=1;
1530 }
1531
1532 #ifndef multdiv_alloc
1533 void multdiv_alloc(struct regstat *current,int i)
1534 {
1535   //  case 0x18: MULT
1536   //  case 0x19: MULTU
1537   //  case 0x1A: DIV
1538   //  case 0x1B: DIVU
1539   //  case 0x1C: DMULT
1540   //  case 0x1D: DMULTU
1541   //  case 0x1E: DDIV
1542   //  case 0x1F: DDIVU
1543   clear_const(current,rs1[i]);
1544   clear_const(current,rs2[i]);
1545   if(rs1[i]&&rs2[i])
1546   {
1547     if((opcode2[i]&4)==0) // 32-bit
1548     {
1549       current->u&=~(1LL<<HIREG);
1550       current->u&=~(1LL<<LOREG);
1551       alloc_reg(current,i,HIREG);
1552       alloc_reg(current,i,LOREG);
1553       alloc_reg(current,i,rs1[i]);
1554       alloc_reg(current,i,rs2[i]);
1555       current->is32|=1LL<<HIREG;
1556       current->is32|=1LL<<LOREG;
1557       dirty_reg(current,HIREG);
1558       dirty_reg(current,LOREG);
1559     }
1560     else // 64-bit
1561     {
1562       current->u&=~(1LL<<HIREG);
1563       current->u&=~(1LL<<LOREG);
1564       current->uu&=~(1LL<<HIREG);
1565       current->uu&=~(1LL<<LOREG);
1566       alloc_reg64(current,i,HIREG);
1567       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1568       alloc_reg64(current,i,rs1[i]);
1569       alloc_reg64(current,i,rs2[i]);
1570       alloc_all(current,i);
1571       current->is32&=~(1LL<<HIREG);
1572       current->is32&=~(1LL<<LOREG);
1573       dirty_reg(current,HIREG);
1574       dirty_reg(current,LOREG);
1575       minimum_free_regs[i]=HOST_REGS;
1576     }
1577   }
1578   else
1579   {
1580     // Multiply by zero is zero.
1581     // MIPS does not have a divide by zero exception.
1582     // The result is undefined, we return zero.
1583     alloc_reg(current,i,HIREG);
1584     alloc_reg(current,i,LOREG);
1585     current->is32|=1LL<<HIREG;
1586     current->is32|=1LL<<LOREG;
1587     dirty_reg(current,HIREG);
1588     dirty_reg(current,LOREG);
1589   }
1590 }
1591 #endif
1592
1593 void cop0_alloc(struct regstat *current,int i)
1594 {
1595   if(opcode2[i]==0) // MFC0
1596   {
1597     if(rt1[i]) {
1598       clear_const(current,rt1[i]);
1599       alloc_all(current,i);
1600       alloc_reg(current,i,rt1[i]);
1601       current->is32|=1LL<<rt1[i];
1602       dirty_reg(current,rt1[i]);
1603     }
1604   }
1605   else if(opcode2[i]==4) // MTC0
1606   {
1607     if(rs1[i]){
1608       clear_const(current,rs1[i]);
1609       alloc_reg(current,i,rs1[i]);
1610       alloc_all(current,i);
1611     }
1612     else {
1613       alloc_all(current,i); // FIXME: Keep r0
1614       current->u&=~1LL;
1615       alloc_reg(current,i,0);
1616     }
1617   }
1618   else
1619   {
1620     // TLBR/TLBWI/TLBWR/TLBP/ERET
1621     assert(opcode2[i]==0x10);
1622     alloc_all(current,i);
1623   }
1624   minimum_free_regs[i]=HOST_REGS;
1625 }
1626
1627 void cop1_alloc(struct regstat *current,int i)
1628 {
1629   alloc_reg(current,i,CSREG); // Load status
1630   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1631   {
1632     if(rt1[i]){
1633       clear_const(current,rt1[i]);
1634       if(opcode2[i]==1) {
1635         alloc_reg64(current,i,rt1[i]); // DMFC1
1636         current->is32&=~(1LL<<rt1[i]);
1637       }else{
1638         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1639         current->is32|=1LL<<rt1[i];
1640       }
1641       dirty_reg(current,rt1[i]);
1642     }
1643     alloc_reg_temp(current,i,-1);
1644   }
1645   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1646   {
1647     if(rs1[i]){
1648       clear_const(current,rs1[i]);
1649       if(opcode2[i]==5)
1650         alloc_reg64(current,i,rs1[i]); // DMTC1
1651       else
1652         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1653       alloc_reg_temp(current,i,-1);
1654     }
1655     else {
1656       current->u&=~1LL;
1657       alloc_reg(current,i,0);
1658       alloc_reg_temp(current,i,-1);
1659     }
1660   }
1661   minimum_free_regs[i]=1;
1662 }
1663 void fconv_alloc(struct regstat *current,int i)
1664 {
1665   alloc_reg(current,i,CSREG); // Load status
1666   alloc_reg_temp(current,i,-1);
1667   minimum_free_regs[i]=1;
1668 }
1669 void float_alloc(struct regstat *current,int i)
1670 {
1671   alloc_reg(current,i,CSREG); // Load status
1672   alloc_reg_temp(current,i,-1);
1673   minimum_free_regs[i]=1;
1674 }
1675 void c2op_alloc(struct regstat *current,int i)
1676 {
1677   alloc_reg_temp(current,i,-1);
1678 }
1679 void fcomp_alloc(struct regstat *current,int i)
1680 {
1681   alloc_reg(current,i,CSREG); // Load status
1682   alloc_reg(current,i,FSREG); // Load flags
1683   dirty_reg(current,FSREG); // Flag will be modified
1684   alloc_reg_temp(current,i,-1);
1685   minimum_free_regs[i]=1;
1686 }
1687
1688 void syscall_alloc(struct regstat *current,int i)
1689 {
1690   alloc_cc(current,i);
1691   dirty_reg(current,CCREG);
1692   alloc_all(current,i);
1693   minimum_free_regs[i]=HOST_REGS;
1694   current->isconst=0;
1695 }
1696
1697 void delayslot_alloc(struct regstat *current,int i)
1698 {
1699   switch(itype[i]) {
1700     case UJUMP:
1701     case CJUMP:
1702     case SJUMP:
1703     case RJUMP:
1704     case FJUMP:
1705     case SYSCALL:
1706     case HLECALL:
1707     case SPAN:
1708       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1709       SysPrintf("Disabled speculative precompilation\n");
1710       stop_after_jal=1;
1711       break;
1712     case IMM16:
1713       imm16_alloc(current,i);
1714       break;
1715     case LOAD:
1716     case LOADLR:
1717       load_alloc(current,i);
1718       break;
1719     case STORE:
1720     case STORELR:
1721       store_alloc(current,i);
1722       break;
1723     case ALU:
1724       alu_alloc(current,i);
1725       break;
1726     case SHIFT:
1727       shift_alloc(current,i);
1728       break;
1729     case MULTDIV:
1730       multdiv_alloc(current,i);
1731       break;
1732     case SHIFTIMM:
1733       shiftimm_alloc(current,i);
1734       break;
1735     case MOV:
1736       mov_alloc(current,i);
1737       break;
1738     case COP0:
1739       cop0_alloc(current,i);
1740       break;
1741     case COP1:
1742     case COP2:
1743       cop1_alloc(current,i);
1744       break;
1745     case C1LS:
1746       c1ls_alloc(current,i);
1747       break;
1748     case C2LS:
1749       c2ls_alloc(current,i);
1750       break;
1751     case FCONV:
1752       fconv_alloc(current,i);
1753       break;
1754     case FLOAT:
1755       float_alloc(current,i);
1756       break;
1757     case FCOMP:
1758       fcomp_alloc(current,i);
1759       break;
1760     case C2OP:
1761       c2op_alloc(current,i);
1762       break;
1763   }
1764 }
1765
1766 // Special case where a branch and delay slot span two pages in virtual memory
1767 static void pagespan_alloc(struct regstat *current,int i)
1768 {
1769   current->isconst=0;
1770   current->wasconst=0;
1771   regs[i].wasconst=0;
1772   minimum_free_regs[i]=HOST_REGS;
1773   alloc_all(current,i);
1774   alloc_cc(current,i);
1775   dirty_reg(current,CCREG);
1776   if(opcode[i]==3) // JAL
1777   {
1778     alloc_reg(current,i,31);
1779     dirty_reg(current,31);
1780   }
1781   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1782   {
1783     alloc_reg(current,i,rs1[i]);
1784     if (rt1[i]!=0) {
1785       alloc_reg(current,i,rt1[i]);
1786       dirty_reg(current,rt1[i]);
1787     }
1788   }
1789   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1790   {
1791     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1792     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1793     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1794     {
1795       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1796       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1797     }
1798   }
1799   else
1800   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1801   {
1802     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1803     if(!((current->is32>>rs1[i])&1))
1804     {
1805       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1806     }
1807   }
1808   else
1809   if(opcode[i]==0x11) // BC1
1810   {
1811     alloc_reg(current,i,FSREG);
1812     alloc_reg(current,i,CSREG);
1813   }
1814   //else ...
1815 }
1816
1817 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1818 {
1819   stubs[stubcount][0]=type;
1820   stubs[stubcount][1]=addr;
1821   stubs[stubcount][2]=retaddr;
1822   stubs[stubcount][3]=a;
1823   stubs[stubcount][4]=b;
1824   stubs[stubcount][5]=c;
1825   stubs[stubcount][6]=d;
1826   stubs[stubcount][7]=e;
1827   stubcount++;
1828 }
1829
1830 // Write out a single register
1831 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1832 {
1833   int hr;
1834   for(hr=0;hr<HOST_REGS;hr++) {
1835     if(hr!=EXCLUDE_REG) {
1836       if((regmap[hr]&63)==r) {
1837         if((dirty>>hr)&1) {
1838           if(regmap[hr]<64) {
1839             emit_storereg(r,hr);
1840           }else{
1841             emit_storereg(r|64,hr);
1842           }
1843         }
1844       }
1845     }
1846   }
1847 }
1848
1849 int mchecksum()
1850 {
1851   //if(!tracedebug) return 0;
1852   int i;
1853   int sum=0;
1854   for(i=0;i<2097152;i++) {
1855     unsigned int temp=sum;
1856     sum<<=1;
1857     sum|=(~temp)>>31;
1858     sum^=((u_int *)rdram)[i];
1859   }
1860   return sum;
1861 }
1862 int rchecksum()
1863 {
1864   int i;
1865   int sum=0;
1866   for(i=0;i<64;i++)
1867     sum^=((u_int *)reg)[i];
1868   return sum;
1869 }
1870 void rlist()
1871 {
1872   int i;
1873   printf("TRACE: ");
1874   for(i=0;i<32;i++)
1875     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
1876   printf("\n");
1877 }
1878
1879 void enabletrace()
1880 {
1881   tracedebug=1;
1882 }
1883
1884 void memdebug(int i)
1885 {
1886   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
1887   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
1888   //rlist();
1889   //if(tracedebug) {
1890   //if(Count>=-2084597794) {
1891   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
1892   //if(0) {
1893     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
1894     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
1895     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
1896     rlist();
1897     #ifdef __i386__
1898     printf("TRACE: %x\n",(&i)[-1]);
1899     #endif
1900     #ifdef __arm__
1901     int j;
1902     printf("TRACE: %x \n",(&j)[10]);
1903     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
1904     #endif
1905     //fflush(stdout);
1906   }
1907   //printf("TRACE: %x\n",(&i)[-1]);
1908 }
1909
1910 void alu_assemble(int i,struct regstat *i_regs)
1911 {
1912   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1913     if(rt1[i]) {
1914       signed char s1,s2,t;
1915       t=get_reg(i_regs->regmap,rt1[i]);
1916       if(t>=0) {
1917         s1=get_reg(i_regs->regmap,rs1[i]);
1918         s2=get_reg(i_regs->regmap,rs2[i]);
1919         if(rs1[i]&&rs2[i]) {
1920           assert(s1>=0);
1921           assert(s2>=0);
1922           if(opcode2[i]&2) emit_sub(s1,s2,t);
1923           else emit_add(s1,s2,t);
1924         }
1925         else if(rs1[i]) {
1926           if(s1>=0) emit_mov(s1,t);
1927           else emit_loadreg(rs1[i],t);
1928         }
1929         else if(rs2[i]) {
1930           if(s2>=0) {
1931             if(opcode2[i]&2) emit_neg(s2,t);
1932             else emit_mov(s2,t);
1933           }
1934           else {
1935             emit_loadreg(rs2[i],t);
1936             if(opcode2[i]&2) emit_neg(t,t);
1937           }
1938         }
1939         else emit_zeroreg(t);
1940       }
1941     }
1942   }
1943   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1944     if(rt1[i]) {
1945       signed char s1l,s2l,s1h,s2h,tl,th;
1946       tl=get_reg(i_regs->regmap,rt1[i]);
1947       th=get_reg(i_regs->regmap,rt1[i]|64);
1948       if(tl>=0) {
1949         s1l=get_reg(i_regs->regmap,rs1[i]);
1950         s2l=get_reg(i_regs->regmap,rs2[i]);
1951         s1h=get_reg(i_regs->regmap,rs1[i]|64);
1952         s2h=get_reg(i_regs->regmap,rs2[i]|64);
1953         if(rs1[i]&&rs2[i]) {
1954           assert(s1l>=0);
1955           assert(s2l>=0);
1956           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
1957           else emit_adds(s1l,s2l,tl);
1958           if(th>=0) {
1959             #ifdef INVERTED_CARRY
1960             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
1961             #else
1962             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
1963             #endif
1964             else emit_add(s1h,s2h,th);
1965           }
1966         }
1967         else if(rs1[i]) {
1968           if(s1l>=0) emit_mov(s1l,tl);
1969           else emit_loadreg(rs1[i],tl);
1970           if(th>=0) {
1971             if(s1h>=0) emit_mov(s1h,th);
1972             else emit_loadreg(rs1[i]|64,th);
1973           }
1974         }
1975         else if(rs2[i]) {
1976           if(s2l>=0) {
1977             if(opcode2[i]&2) emit_negs(s2l,tl);
1978             else emit_mov(s2l,tl);
1979           }
1980           else {
1981             emit_loadreg(rs2[i],tl);
1982             if(opcode2[i]&2) emit_negs(tl,tl);
1983           }
1984           if(th>=0) {
1985             #ifdef INVERTED_CARRY
1986             if(s2h>=0) emit_mov(s2h,th);
1987             else emit_loadreg(rs2[i]|64,th);
1988             if(opcode2[i]&2) {
1989               emit_adcimm(-1,th); // x86 has inverted carry flag
1990               emit_not(th,th);
1991             }
1992             #else
1993             if(opcode2[i]&2) {
1994               if(s2h>=0) emit_rscimm(s2h,0,th);
1995               else {
1996                 emit_loadreg(rs2[i]|64,th);
1997                 emit_rscimm(th,0,th);
1998               }
1999             }else{
2000               if(s2h>=0) emit_mov(s2h,th);
2001               else emit_loadreg(rs2[i]|64,th);
2002             }
2003             #endif
2004           }
2005         }
2006         else {
2007           emit_zeroreg(tl);
2008           if(th>=0) emit_zeroreg(th);
2009         }
2010       }
2011     }
2012   }
2013   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2014     if(rt1[i]) {
2015       signed char s1l,s1h,s2l,s2h,t;
2016       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2017       {
2018         t=get_reg(i_regs->regmap,rt1[i]);
2019         //assert(t>=0);
2020         if(t>=0) {
2021           s1l=get_reg(i_regs->regmap,rs1[i]);
2022           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2023           s2l=get_reg(i_regs->regmap,rs2[i]);
2024           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2025           if(rs2[i]==0) // rx<r0
2026           {
2027             assert(s1h>=0);
2028             if(opcode2[i]==0x2a) // SLT
2029               emit_shrimm(s1h,31,t);
2030             else // SLTU (unsigned can not be less than zero)
2031               emit_zeroreg(t);
2032           }
2033           else if(rs1[i]==0) // r0<rx
2034           {
2035             assert(s2h>=0);
2036             if(opcode2[i]==0x2a) // SLT
2037               emit_set_gz64_32(s2h,s2l,t);
2038             else // SLTU (set if not zero)
2039               emit_set_nz64_32(s2h,s2l,t);
2040           }
2041           else {
2042             assert(s1l>=0);assert(s1h>=0);
2043             assert(s2l>=0);assert(s2h>=0);
2044             if(opcode2[i]==0x2a) // SLT
2045               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2046             else // SLTU
2047               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2048           }
2049         }
2050       } else {
2051         t=get_reg(i_regs->regmap,rt1[i]);
2052         //assert(t>=0);
2053         if(t>=0) {
2054           s1l=get_reg(i_regs->regmap,rs1[i]);
2055           s2l=get_reg(i_regs->regmap,rs2[i]);
2056           if(rs2[i]==0) // rx<r0
2057           {
2058             assert(s1l>=0);
2059             if(opcode2[i]==0x2a) // SLT
2060               emit_shrimm(s1l,31,t);
2061             else // SLTU (unsigned can not be less than zero)
2062               emit_zeroreg(t);
2063           }
2064           else if(rs1[i]==0) // r0<rx
2065           {
2066             assert(s2l>=0);
2067             if(opcode2[i]==0x2a) // SLT
2068               emit_set_gz32(s2l,t);
2069             else // SLTU (set if not zero)
2070               emit_set_nz32(s2l,t);
2071           }
2072           else{
2073             assert(s1l>=0);assert(s2l>=0);
2074             if(opcode2[i]==0x2a) // SLT
2075               emit_set_if_less32(s1l,s2l,t);
2076             else // SLTU
2077               emit_set_if_carry32(s1l,s2l,t);
2078           }
2079         }
2080       }
2081     }
2082   }
2083   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2084     if(rt1[i]) {
2085       signed char s1l,s1h,s2l,s2h,th,tl;
2086       tl=get_reg(i_regs->regmap,rt1[i]);
2087       th=get_reg(i_regs->regmap,rt1[i]|64);
2088       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2089       {
2090         assert(tl>=0);
2091         if(tl>=0) {
2092           s1l=get_reg(i_regs->regmap,rs1[i]);
2093           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2094           s2l=get_reg(i_regs->regmap,rs2[i]);
2095           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2096           if(rs1[i]&&rs2[i]) {
2097             assert(s1l>=0);assert(s1h>=0);
2098             assert(s2l>=0);assert(s2h>=0);
2099             if(opcode2[i]==0x24) { // AND
2100               emit_and(s1l,s2l,tl);
2101               emit_and(s1h,s2h,th);
2102             } else
2103             if(opcode2[i]==0x25) { // OR
2104               emit_or(s1l,s2l,tl);
2105               emit_or(s1h,s2h,th);
2106             } else
2107             if(opcode2[i]==0x26) { // XOR
2108               emit_xor(s1l,s2l,tl);
2109               emit_xor(s1h,s2h,th);
2110             } else
2111             if(opcode2[i]==0x27) { // NOR
2112               emit_or(s1l,s2l,tl);
2113               emit_or(s1h,s2h,th);
2114               emit_not(tl,tl);
2115               emit_not(th,th);
2116             }
2117           }
2118           else
2119           {
2120             if(opcode2[i]==0x24) { // AND
2121               emit_zeroreg(tl);
2122               emit_zeroreg(th);
2123             } else
2124             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2125               if(rs1[i]){
2126                 if(s1l>=0) emit_mov(s1l,tl);
2127                 else emit_loadreg(rs1[i],tl);
2128                 if(s1h>=0) emit_mov(s1h,th);
2129                 else emit_loadreg(rs1[i]|64,th);
2130               }
2131               else
2132               if(rs2[i]){
2133                 if(s2l>=0) emit_mov(s2l,tl);
2134                 else emit_loadreg(rs2[i],tl);
2135                 if(s2h>=0) emit_mov(s2h,th);
2136                 else emit_loadreg(rs2[i]|64,th);
2137               }
2138               else{
2139                 emit_zeroreg(tl);
2140                 emit_zeroreg(th);
2141               }
2142             } else
2143             if(opcode2[i]==0x27) { // NOR
2144               if(rs1[i]){
2145                 if(s1l>=0) emit_not(s1l,tl);
2146                 else{
2147                   emit_loadreg(rs1[i],tl);
2148                   emit_not(tl,tl);
2149                 }
2150                 if(s1h>=0) emit_not(s1h,th);
2151                 else{
2152                   emit_loadreg(rs1[i]|64,th);
2153                   emit_not(th,th);
2154                 }
2155               }
2156               else
2157               if(rs2[i]){
2158                 if(s2l>=0) emit_not(s2l,tl);
2159                 else{
2160                   emit_loadreg(rs2[i],tl);
2161                   emit_not(tl,tl);
2162                 }
2163                 if(s2h>=0) emit_not(s2h,th);
2164                 else{
2165                   emit_loadreg(rs2[i]|64,th);
2166                   emit_not(th,th);
2167                 }
2168               }
2169               else {
2170                 emit_movimm(-1,tl);
2171                 emit_movimm(-1,th);
2172               }
2173             }
2174           }
2175         }
2176       }
2177       else
2178       {
2179         // 32 bit
2180         if(tl>=0) {
2181           s1l=get_reg(i_regs->regmap,rs1[i]);
2182           s2l=get_reg(i_regs->regmap,rs2[i]);
2183           if(rs1[i]&&rs2[i]) {
2184             assert(s1l>=0);
2185             assert(s2l>=0);
2186             if(opcode2[i]==0x24) { // AND
2187               emit_and(s1l,s2l,tl);
2188             } else
2189             if(opcode2[i]==0x25) { // OR
2190               emit_or(s1l,s2l,tl);
2191             } else
2192             if(opcode2[i]==0x26) { // XOR
2193               emit_xor(s1l,s2l,tl);
2194             } else
2195             if(opcode2[i]==0x27) { // NOR
2196               emit_or(s1l,s2l,tl);
2197               emit_not(tl,tl);
2198             }
2199           }
2200           else
2201           {
2202             if(opcode2[i]==0x24) { // AND
2203               emit_zeroreg(tl);
2204             } else
2205             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2206               if(rs1[i]){
2207                 if(s1l>=0) emit_mov(s1l,tl);
2208                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2209               }
2210               else
2211               if(rs2[i]){
2212                 if(s2l>=0) emit_mov(s2l,tl);
2213                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2214               }
2215               else emit_zeroreg(tl);
2216             } else
2217             if(opcode2[i]==0x27) { // NOR
2218               if(rs1[i]){
2219                 if(s1l>=0) emit_not(s1l,tl);
2220                 else {
2221                   emit_loadreg(rs1[i],tl);
2222                   emit_not(tl,tl);
2223                 }
2224               }
2225               else
2226               if(rs2[i]){
2227                 if(s2l>=0) emit_not(s2l,tl);
2228                 else {
2229                   emit_loadreg(rs2[i],tl);
2230                   emit_not(tl,tl);
2231                 }
2232               }
2233               else emit_movimm(-1,tl);
2234             }
2235           }
2236         }
2237       }
2238     }
2239   }
2240 }
2241
2242 void imm16_assemble(int i,struct regstat *i_regs)
2243 {
2244   if (opcode[i]==0x0f) { // LUI
2245     if(rt1[i]) {
2246       signed char t;
2247       t=get_reg(i_regs->regmap,rt1[i]);
2248       //assert(t>=0);
2249       if(t>=0) {
2250         if(!((i_regs->isconst>>t)&1))
2251           emit_movimm(imm[i]<<16,t);
2252       }
2253     }
2254   }
2255   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2256     if(rt1[i]) {
2257       signed char s,t;
2258       t=get_reg(i_regs->regmap,rt1[i]);
2259       s=get_reg(i_regs->regmap,rs1[i]);
2260       if(rs1[i]) {
2261         //assert(t>=0);
2262         //assert(s>=0);
2263         if(t>=0) {
2264           if(!((i_regs->isconst>>t)&1)) {
2265             if(s<0) {
2266               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2267               emit_addimm(t,imm[i],t);
2268             }else{
2269               if(!((i_regs->wasconst>>s)&1))
2270                 emit_addimm(s,imm[i],t);
2271               else
2272                 emit_movimm(constmap[i][s]+imm[i],t);
2273             }
2274           }
2275         }
2276       } else {
2277         if(t>=0) {
2278           if(!((i_regs->isconst>>t)&1))
2279             emit_movimm(imm[i],t);
2280         }
2281       }
2282     }
2283   }
2284   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2285     if(rt1[i]) {
2286       signed char sh,sl,th,tl;
2287       th=get_reg(i_regs->regmap,rt1[i]|64);
2288       tl=get_reg(i_regs->regmap,rt1[i]);
2289       sh=get_reg(i_regs->regmap,rs1[i]|64);
2290       sl=get_reg(i_regs->regmap,rs1[i]);
2291       if(tl>=0) {
2292         if(rs1[i]) {
2293           assert(sh>=0);
2294           assert(sl>=0);
2295           if(th>=0) {
2296             emit_addimm64_32(sh,sl,imm[i],th,tl);
2297           }
2298           else {
2299             emit_addimm(sl,imm[i],tl);
2300           }
2301         } else {
2302           emit_movimm(imm[i],tl);
2303           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2304         }
2305       }
2306     }
2307   }
2308   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2309     if(rt1[i]) {
2310       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2311       signed char sh,sl,t;
2312       t=get_reg(i_regs->regmap,rt1[i]);
2313       sh=get_reg(i_regs->regmap,rs1[i]|64);
2314       sl=get_reg(i_regs->regmap,rs1[i]);
2315       //assert(t>=0);
2316       if(t>=0) {
2317         if(rs1[i]>0) {
2318           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2319           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2320             if(opcode[i]==0x0a) { // SLTI
2321               if(sl<0) {
2322                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2323                 emit_slti32(t,imm[i],t);
2324               }else{
2325                 emit_slti32(sl,imm[i],t);
2326               }
2327             }
2328             else { // SLTIU
2329               if(sl<0) {
2330                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2331                 emit_sltiu32(t,imm[i],t);
2332               }else{
2333                 emit_sltiu32(sl,imm[i],t);
2334               }
2335             }
2336           }else{ // 64-bit
2337             assert(sl>=0);
2338             if(opcode[i]==0x0a) // SLTI
2339               emit_slti64_32(sh,sl,imm[i],t);
2340             else // SLTIU
2341               emit_sltiu64_32(sh,sl,imm[i],t);
2342           }
2343         }else{
2344           // SLTI(U) with r0 is just stupid,
2345           // nonetheless examples can be found
2346           if(opcode[i]==0x0a) // SLTI
2347             if(0<imm[i]) emit_movimm(1,t);
2348             else emit_zeroreg(t);
2349           else // SLTIU
2350           {
2351             if(imm[i]) emit_movimm(1,t);
2352             else emit_zeroreg(t);
2353           }
2354         }
2355       }
2356     }
2357   }
2358   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2359     if(rt1[i]) {
2360       signed char sh,sl,th,tl;
2361       th=get_reg(i_regs->regmap,rt1[i]|64);
2362       tl=get_reg(i_regs->regmap,rt1[i]);
2363       sh=get_reg(i_regs->regmap,rs1[i]|64);
2364       sl=get_reg(i_regs->regmap,rs1[i]);
2365       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2366         if(opcode[i]==0x0c) //ANDI
2367         {
2368           if(rs1[i]) {
2369             if(sl<0) {
2370               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2371               emit_andimm(tl,imm[i],tl);
2372             }else{
2373               if(!((i_regs->wasconst>>sl)&1))
2374                 emit_andimm(sl,imm[i],tl);
2375               else
2376                 emit_movimm(constmap[i][sl]&imm[i],tl);
2377             }
2378           }
2379           else
2380             emit_zeroreg(tl);
2381           if(th>=0) emit_zeroreg(th);
2382         }
2383         else
2384         {
2385           if(rs1[i]) {
2386             if(sl<0) {
2387               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2388             }
2389             if(th>=0) {
2390               if(sh<0) {
2391                 emit_loadreg(rs1[i]|64,th);
2392               }else{
2393                 emit_mov(sh,th);
2394               }
2395             }
2396             if(opcode[i]==0x0d) { // ORI
2397               if(sl<0) {
2398                 emit_orimm(tl,imm[i],tl);
2399               }else{
2400                 if(!((i_regs->wasconst>>sl)&1))
2401                   emit_orimm(sl,imm[i],tl);
2402                 else
2403                   emit_movimm(constmap[i][sl]|imm[i],tl);
2404               }
2405             }
2406             if(opcode[i]==0x0e) { // XORI
2407               if(sl<0) {
2408                 emit_xorimm(tl,imm[i],tl);
2409               }else{
2410                 if(!((i_regs->wasconst>>sl)&1))
2411                   emit_xorimm(sl,imm[i],tl);
2412                 else
2413                   emit_movimm(constmap[i][sl]^imm[i],tl);
2414               }
2415             }
2416           }
2417           else {
2418             emit_movimm(imm[i],tl);
2419             if(th>=0) emit_zeroreg(th);
2420           }
2421         }
2422       }
2423     }
2424   }
2425 }
2426
2427 void shiftimm_assemble(int i,struct regstat *i_regs)
2428 {
2429   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2430   {
2431     if(rt1[i]) {
2432       signed char s,t;
2433       t=get_reg(i_regs->regmap,rt1[i]);
2434       s=get_reg(i_regs->regmap,rs1[i]);
2435       //assert(t>=0);
2436       if(t>=0&&!((i_regs->isconst>>t)&1)){
2437         if(rs1[i]==0)
2438         {
2439           emit_zeroreg(t);
2440         }
2441         else
2442         {
2443           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2444           if(imm[i]) {
2445             if(opcode2[i]==0) // SLL
2446             {
2447               emit_shlimm(s<0?t:s,imm[i],t);
2448             }
2449             if(opcode2[i]==2) // SRL
2450             {
2451               emit_shrimm(s<0?t:s,imm[i],t);
2452             }
2453             if(opcode2[i]==3) // SRA
2454             {
2455               emit_sarimm(s<0?t:s,imm[i],t);
2456             }
2457           }else{
2458             // Shift by zero
2459             if(s>=0 && s!=t) emit_mov(s,t);
2460           }
2461         }
2462       }
2463       //emit_storereg(rt1[i],t); //DEBUG
2464     }
2465   }
2466   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2467   {
2468     if(rt1[i]) {
2469       signed char sh,sl,th,tl;
2470       th=get_reg(i_regs->regmap,rt1[i]|64);
2471       tl=get_reg(i_regs->regmap,rt1[i]);
2472       sh=get_reg(i_regs->regmap,rs1[i]|64);
2473       sl=get_reg(i_regs->regmap,rs1[i]);
2474       if(tl>=0) {
2475         if(rs1[i]==0)
2476         {
2477           emit_zeroreg(tl);
2478           if(th>=0) emit_zeroreg(th);
2479         }
2480         else
2481         {
2482           assert(sl>=0);
2483           assert(sh>=0);
2484           if(imm[i]) {
2485             if(opcode2[i]==0x38) // DSLL
2486             {
2487               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2488               emit_shlimm(sl,imm[i],tl);
2489             }
2490             if(opcode2[i]==0x3a) // DSRL
2491             {
2492               emit_shrdimm(sl,sh,imm[i],tl);
2493               if(th>=0) emit_shrimm(sh,imm[i],th);
2494             }
2495             if(opcode2[i]==0x3b) // DSRA
2496             {
2497               emit_shrdimm(sl,sh,imm[i],tl);
2498               if(th>=0) emit_sarimm(sh,imm[i],th);
2499             }
2500           }else{
2501             // Shift by zero
2502             if(sl!=tl) emit_mov(sl,tl);
2503             if(th>=0&&sh!=th) emit_mov(sh,th);
2504           }
2505         }
2506       }
2507     }
2508   }
2509   if(opcode2[i]==0x3c) // DSLL32
2510   {
2511     if(rt1[i]) {
2512       signed char sl,tl,th;
2513       tl=get_reg(i_regs->regmap,rt1[i]);
2514       th=get_reg(i_regs->regmap,rt1[i]|64);
2515       sl=get_reg(i_regs->regmap,rs1[i]);
2516       if(th>=0||tl>=0){
2517         assert(tl>=0);
2518         assert(th>=0);
2519         assert(sl>=0);
2520         emit_mov(sl,th);
2521         emit_zeroreg(tl);
2522         if(imm[i]>32)
2523         {
2524           emit_shlimm(th,imm[i]&31,th);
2525         }
2526       }
2527     }
2528   }
2529   if(opcode2[i]==0x3e) // DSRL32
2530   {
2531     if(rt1[i]) {
2532       signed char sh,tl,th;
2533       tl=get_reg(i_regs->regmap,rt1[i]);
2534       th=get_reg(i_regs->regmap,rt1[i]|64);
2535       sh=get_reg(i_regs->regmap,rs1[i]|64);
2536       if(tl>=0){
2537         assert(sh>=0);
2538         emit_mov(sh,tl);
2539         if(th>=0) emit_zeroreg(th);
2540         if(imm[i]>32)
2541         {
2542           emit_shrimm(tl,imm[i]&31,tl);
2543         }
2544       }
2545     }
2546   }
2547   if(opcode2[i]==0x3f) // DSRA32
2548   {
2549     if(rt1[i]) {
2550       signed char sh,tl;
2551       tl=get_reg(i_regs->regmap,rt1[i]);
2552       sh=get_reg(i_regs->regmap,rs1[i]|64);
2553       if(tl>=0){
2554         assert(sh>=0);
2555         emit_mov(sh,tl);
2556         if(imm[i]>32)
2557         {
2558           emit_sarimm(tl,imm[i]&31,tl);
2559         }
2560       }
2561     }
2562   }
2563 }
2564
2565 #ifndef shift_assemble
2566 void shift_assemble(int i,struct regstat *i_regs)
2567 {
2568   printf("Need shift_assemble for this architecture.\n");
2569   exit(1);
2570 }
2571 #endif
2572
2573 void load_assemble(int i,struct regstat *i_regs)
2574 {
2575   int s,th,tl,addr,map=-1;
2576   int offset;
2577   int jaddr=0;
2578   int memtarget=0,c=0;
2579   int fastload_reg_override=0;
2580   u_int hr,reglist=0;
2581   th=get_reg(i_regs->regmap,rt1[i]|64);
2582   tl=get_reg(i_regs->regmap,rt1[i]);
2583   s=get_reg(i_regs->regmap,rs1[i]);
2584   offset=imm[i];
2585   for(hr=0;hr<HOST_REGS;hr++) {
2586     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2587   }
2588   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2589   if(s>=0) {
2590     c=(i_regs->wasconst>>s)&1;
2591     if (c) {
2592       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2593     }
2594   }
2595   //printf("load_assemble: c=%d\n",c);
2596   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2597   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2598   if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2599     ||rt1[i]==0) {
2600       // could be FIFO, must perform the read
2601       // ||dummy read
2602       assem_debug("(forced read)\n");
2603       tl=get_reg(i_regs->regmap,-1);
2604       assert(tl>=0);
2605   }
2606   if(offset||s<0||c) addr=tl;
2607   else addr=s;
2608   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2609  if(tl>=0) {
2610   //printf("load_assemble: c=%d\n",c);
2611   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2612   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2613   reglist&=~(1<<tl);
2614   if(th>=0) reglist&=~(1<<th);
2615   if(!c) {
2616     #ifdef RAM_OFFSET
2617     map=get_reg(i_regs->regmap,ROREG);
2618     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2619     #endif
2620     #ifdef R29_HACK
2621     // Strmnnrmn's speed hack
2622     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2623     #endif
2624     {
2625       jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2626     }
2627   }
2628   else if(ram_offset&&memtarget) {
2629     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2630     fastload_reg_override=HOST_TEMPREG;
2631   }
2632   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2633   if (opcode[i]==0x20) { // LB
2634     if(!c||memtarget) {
2635       if(!dummy) {
2636         #ifdef HOST_IMM_ADDR32
2637         if(c)
2638           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2639         else
2640         #endif
2641         {
2642           //emit_xorimm(addr,3,tl);
2643           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2644           int x=0,a=tl;
2645 #ifdef BIG_ENDIAN_MIPS
2646           if(!c) emit_xorimm(addr,3,tl);
2647           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2648 #else
2649           if(!c) a=addr;
2650 #endif
2651           if(fastload_reg_override) a=fastload_reg_override;
2652
2653           emit_movsbl_indexed_tlb(x,a,map,tl);
2654         }
2655       }
2656       if(jaddr)
2657         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2658     }
2659     else
2660       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2661   }
2662   if (opcode[i]==0x21) { // LH
2663     if(!c||memtarget) {
2664       if(!dummy) {
2665         #ifdef HOST_IMM_ADDR32
2666         if(c)
2667           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2668         else
2669         #endif
2670         {
2671           int x=0,a=tl;
2672 #ifdef BIG_ENDIAN_MIPS
2673           if(!c) emit_xorimm(addr,2,tl);
2674           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2675 #else
2676           if(!c) a=addr;
2677 #endif
2678           if(fastload_reg_override) a=fastload_reg_override;
2679           //#ifdef
2680           //emit_movswl_indexed_tlb(x,tl,map,tl);
2681           //else
2682           if(map>=0) {
2683             emit_movswl_indexed(x,a,tl);
2684           }else{
2685             #if 1 //def RAM_OFFSET
2686             emit_movswl_indexed(x,a,tl);
2687             #else
2688             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2689             #endif
2690           }
2691         }
2692       }
2693       if(jaddr)
2694         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2695     }
2696     else
2697       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2698   }
2699   if (opcode[i]==0x23) { // LW
2700     if(!c||memtarget) {
2701       if(!dummy) {
2702         int a=addr;
2703         if(fastload_reg_override) a=fastload_reg_override;
2704         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2705         #ifdef HOST_IMM_ADDR32
2706         if(c)
2707           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2708         else
2709         #endif
2710         emit_readword_indexed_tlb(0,a,map,tl);
2711       }
2712       if(jaddr)
2713         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2714     }
2715     else
2716       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2717   }
2718   if (opcode[i]==0x24) { // LBU
2719     if(!c||memtarget) {
2720       if(!dummy) {
2721         #ifdef HOST_IMM_ADDR32
2722         if(c)
2723           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2724         else
2725         #endif
2726         {
2727           //emit_xorimm(addr,3,tl);
2728           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2729           int x=0,a=tl;
2730 #ifdef BIG_ENDIAN_MIPS
2731           if(!c) emit_xorimm(addr,3,tl);
2732           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2733 #else
2734           if(!c) a=addr;
2735 #endif
2736           if(fastload_reg_override) a=fastload_reg_override;
2737
2738           emit_movzbl_indexed_tlb(x,a,map,tl);
2739         }
2740       }
2741       if(jaddr)
2742         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2743     }
2744     else
2745       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2746   }
2747   if (opcode[i]==0x25) { // LHU
2748     if(!c||memtarget) {
2749       if(!dummy) {
2750         #ifdef HOST_IMM_ADDR32
2751         if(c)
2752           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2753         else
2754         #endif
2755         {
2756           int x=0,a=tl;
2757 #ifdef BIG_ENDIAN_MIPS
2758           if(!c) emit_xorimm(addr,2,tl);
2759           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2760 #else
2761           if(!c) a=addr;
2762 #endif
2763           if(fastload_reg_override) a=fastload_reg_override;
2764           //#ifdef
2765           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2766           //#else
2767           if(map>=0) {
2768             emit_movzwl_indexed(x,a,tl);
2769           }else{
2770             #if 1 //def RAM_OFFSET
2771             emit_movzwl_indexed(x,a,tl);
2772             #else
2773             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2774             #endif
2775           }
2776         }
2777       }
2778       if(jaddr)
2779         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2780     }
2781     else
2782       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2783   }
2784   if (opcode[i]==0x27) { // LWU
2785     assert(th>=0);
2786     if(!c||memtarget) {
2787       if(!dummy) {
2788         int a=addr;
2789         if(fastload_reg_override) a=fastload_reg_override;
2790         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2791         #ifdef HOST_IMM_ADDR32
2792         if(c)
2793           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2794         else
2795         #endif
2796         emit_readword_indexed_tlb(0,a,map,tl);
2797       }
2798       if(jaddr)
2799         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2800     }
2801     else {
2802       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2803     }
2804     emit_zeroreg(th);
2805   }
2806   if (opcode[i]==0x37) { // LD
2807     if(!c||memtarget) {
2808       if(!dummy) {
2809         int a=addr;
2810         if(fastload_reg_override) a=fastload_reg_override;
2811         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2812         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2813         #ifdef HOST_IMM_ADDR32
2814         if(c)
2815           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2816         else
2817         #endif
2818         emit_readdword_indexed_tlb(0,a,map,th,tl);
2819       }
2820       if(jaddr)
2821         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2822     }
2823     else
2824       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2825   }
2826  }
2827   //emit_storereg(rt1[i],tl); // DEBUG
2828   //if(opcode[i]==0x23)
2829   //if(opcode[i]==0x24)
2830   //if(opcode[i]==0x23||opcode[i]==0x24)
2831   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2832   {
2833     //emit_pusha();
2834     save_regs(0x100f);
2835         emit_readword((int)&last_count,ECX);
2836         #ifdef __i386__
2837         if(get_reg(i_regs->regmap,CCREG)<0)
2838           emit_loadreg(CCREG,HOST_CCREG);
2839         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2840         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2841         emit_writeword(HOST_CCREG,(int)&Count);
2842         #endif
2843         #ifdef __arm__
2844         if(get_reg(i_regs->regmap,CCREG)<0)
2845           emit_loadreg(CCREG,0);
2846         else
2847           emit_mov(HOST_CCREG,0);
2848         emit_add(0,ECX,0);
2849         emit_addimm(0,2*ccadj[i],0);
2850         emit_writeword(0,(int)&Count);
2851         #endif
2852     emit_call((int)memdebug);
2853     //emit_popa();
2854     restore_regs(0x100f);
2855   }*/
2856 }
2857
2858 #ifndef loadlr_assemble
2859 void loadlr_assemble(int i,struct regstat *i_regs)
2860 {
2861   printf("Need loadlr_assemble for this architecture.\n");
2862   exit(1);
2863 }
2864 #endif
2865
2866 void store_assemble(int i,struct regstat *i_regs)
2867 {
2868   int s,th,tl,map=-1;
2869   int addr,temp;
2870   int offset;
2871   int jaddr=0,type;
2872   int memtarget=0,c=0;
2873   int agr=AGEN1+(i&1);
2874   int faststore_reg_override=0;
2875   u_int hr,reglist=0;
2876   th=get_reg(i_regs->regmap,rs2[i]|64);
2877   tl=get_reg(i_regs->regmap,rs2[i]);
2878   s=get_reg(i_regs->regmap,rs1[i]);
2879   temp=get_reg(i_regs->regmap,agr);
2880   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2881   offset=imm[i];
2882   if(s>=0) {
2883     c=(i_regs->wasconst>>s)&1;
2884     if(c) {
2885       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2886     }
2887   }
2888   assert(tl>=0);
2889   assert(temp>=0);
2890   for(hr=0;hr<HOST_REGS;hr++) {
2891     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2892   }
2893   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2894   if(offset||s<0||c) addr=temp;
2895   else addr=s;
2896   if(!c) {
2897     jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2898   }
2899   else if(ram_offset&&memtarget) {
2900     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2901     faststore_reg_override=HOST_TEMPREG;
2902   }
2903
2904   if (opcode[i]==0x28) { // SB
2905     if(!c||memtarget) {
2906       int x=0,a=temp;
2907 #ifdef BIG_ENDIAN_MIPS
2908       if(!c) emit_xorimm(addr,3,temp);
2909       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2910 #else
2911       if(!c) a=addr;
2912 #endif
2913       if(faststore_reg_override) a=faststore_reg_override;
2914       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
2915       emit_writebyte_indexed_tlb(tl,x,a,map,a);
2916     }
2917     type=STOREB_STUB;
2918   }
2919   if (opcode[i]==0x29) { // SH
2920     if(!c||memtarget) {
2921       int x=0,a=temp;
2922 #ifdef BIG_ENDIAN_MIPS
2923       if(!c) emit_xorimm(addr,2,temp);
2924       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2925 #else
2926       if(!c) a=addr;
2927 #endif
2928       if(faststore_reg_override) a=faststore_reg_override;
2929       //#ifdef
2930       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
2931       //#else
2932       if(map>=0) {
2933         emit_writehword_indexed(tl,x,a);
2934       }else
2935         //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
2936         emit_writehword_indexed(tl,x,a);
2937     }
2938     type=STOREH_STUB;
2939   }
2940   if (opcode[i]==0x2B) { // SW
2941     if(!c||memtarget) {
2942       int a=addr;
2943       if(faststore_reg_override) a=faststore_reg_override;
2944       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
2945       emit_writeword_indexed_tlb(tl,0,a,map,temp);
2946     }
2947     type=STOREW_STUB;
2948   }
2949   if (opcode[i]==0x3F) { // SD
2950     if(!c||memtarget) {
2951       int a=addr;
2952       if(faststore_reg_override) a=faststore_reg_override;
2953       if(rs2[i]) {
2954         assert(th>=0);
2955         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
2956         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
2957         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
2958       }else{
2959         // Store zero
2960         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
2961         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
2962         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
2963       }
2964     }
2965     type=STORED_STUB;
2966   }
2967   if(jaddr) {
2968     // PCSX store handlers don't check invcode again
2969     reglist|=1<<addr;
2970     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2971     jaddr=0;
2972   }
2973   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2974     if(!c||memtarget) {
2975       #ifdef DESTRUCTIVE_SHIFT
2976       // The x86 shift operation is 'destructive'; it overwrites the
2977       // source register, so we need to make a copy first and use that.
2978       addr=temp;
2979       #endif
2980       #if defined(HOST_IMM8)
2981       int ir=get_reg(i_regs->regmap,INVCP);
2982       assert(ir>=0);
2983       emit_cmpmem_indexedsr12_reg(ir,addr,1);
2984       #else
2985       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
2986       #endif
2987       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2988       emit_callne(invalidate_addr_reg[addr]);
2989       #else
2990       int jaddr2=(int)out;
2991       emit_jne(0);
2992       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2993       #endif
2994     }
2995   }
2996   u_int addr_val=constmap[i][s]+offset;
2997   if(jaddr) {
2998     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2999   } else if(c&&!memtarget) {
3000     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3001   }
3002   // basic current block modification detection..
3003   // not looking back as that should be in mips cache already
3004   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3005     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3006     assert(i_regs->regmap==regs[i].regmap); // not delay slot
3007     if(i_regs->regmap==regs[i].regmap) {
3008       load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3009       wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3010       emit_movimm(start+i*4+4,0);
3011       emit_writeword(0,(int)&pcaddr);
3012       emit_jmp((int)do_interrupt);
3013     }
3014   }
3015   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3016   //if(opcode[i]==0x2B || opcode[i]==0x28)
3017   //if(opcode[i]==0x2B || opcode[i]==0x29)
3018   //if(opcode[i]==0x2B)
3019   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3020   {
3021     #ifdef __i386__
3022     emit_pusha();
3023     #endif
3024     #ifdef __arm__
3025     save_regs(0x100f);
3026     #endif
3027         emit_readword((int)&last_count,ECX);
3028         #ifdef __i386__
3029         if(get_reg(i_regs->regmap,CCREG)<0)
3030           emit_loadreg(CCREG,HOST_CCREG);
3031         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3032         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3033         emit_writeword(HOST_CCREG,(int)&Count);
3034         #endif
3035         #ifdef __arm__
3036         if(get_reg(i_regs->regmap,CCREG)<0)
3037           emit_loadreg(CCREG,0);
3038         else
3039           emit_mov(HOST_CCREG,0);
3040         emit_add(0,ECX,0);
3041         emit_addimm(0,2*ccadj[i],0);
3042         emit_writeword(0,(int)&Count);
3043         #endif
3044     emit_call((int)memdebug);
3045     #ifdef __i386__
3046     emit_popa();
3047     #endif
3048     #ifdef __arm__
3049     restore_regs(0x100f);
3050     #endif
3051   }*/
3052 }
3053
3054 void storelr_assemble(int i,struct regstat *i_regs)
3055 {
3056   int s,th,tl;
3057   int temp;
3058   int temp2=-1;
3059   int offset;
3060   int jaddr=0;
3061   int case1,case2,case3;
3062   int done0,done1,done2;
3063   int memtarget=0,c=0;
3064   int agr=AGEN1+(i&1);
3065   u_int hr,reglist=0;
3066   th=get_reg(i_regs->regmap,rs2[i]|64);
3067   tl=get_reg(i_regs->regmap,rs2[i]);
3068   s=get_reg(i_regs->regmap,rs1[i]);
3069   temp=get_reg(i_regs->regmap,agr);
3070   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3071   offset=imm[i];
3072   if(s>=0) {
3073     c=(i_regs->isconst>>s)&1;
3074     if(c) {
3075       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3076     }
3077   }
3078   assert(tl>=0);
3079   for(hr=0;hr<HOST_REGS;hr++) {
3080     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3081   }
3082   assert(temp>=0);
3083   if(!c) {
3084     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3085     if(!offset&&s!=temp) emit_mov(s,temp);
3086     jaddr=(int)out;
3087     emit_jno(0);
3088   }
3089   else
3090   {
3091     if(!memtarget||!rs1[i]) {
3092       jaddr=(int)out;
3093       emit_jmp(0);
3094     }
3095   }
3096   #ifdef RAM_OFFSET
3097   int map=get_reg(i_regs->regmap,ROREG);
3098   if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3099   #else
3100   if((u_int)rdram!=0x80000000)
3101     emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3102   #endif
3103
3104   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3105     temp2=get_reg(i_regs->regmap,FTEMP);
3106     if(!rs2[i]) temp2=th=tl;
3107   }
3108
3109 #ifndef BIG_ENDIAN_MIPS
3110     emit_xorimm(temp,3,temp);
3111 #endif
3112   emit_testimm(temp,2);
3113   case2=(int)out;
3114   emit_jne(0);
3115   emit_testimm(temp,1);
3116   case1=(int)out;
3117   emit_jne(0);
3118   // 0
3119   if (opcode[i]==0x2A) { // SWL
3120     emit_writeword_indexed(tl,0,temp);
3121   }
3122   if (opcode[i]==0x2E) { // SWR
3123     emit_writebyte_indexed(tl,3,temp);
3124   }
3125   if (opcode[i]==0x2C) { // SDL
3126     emit_writeword_indexed(th,0,temp);
3127     if(rs2[i]) emit_mov(tl,temp2);
3128   }
3129   if (opcode[i]==0x2D) { // SDR
3130     emit_writebyte_indexed(tl,3,temp);
3131     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3132   }
3133   done0=(int)out;
3134   emit_jmp(0);
3135   // 1
3136   set_jump_target(case1,(int)out);
3137   if (opcode[i]==0x2A) { // SWL
3138     // Write 3 msb into three least significant bytes
3139     if(rs2[i]) emit_rorimm(tl,8,tl);
3140     emit_writehword_indexed(tl,-1,temp);
3141     if(rs2[i]) emit_rorimm(tl,16,tl);
3142     emit_writebyte_indexed(tl,1,temp);
3143     if(rs2[i]) emit_rorimm(tl,8,tl);
3144   }
3145   if (opcode[i]==0x2E) { // SWR
3146     // Write two lsb into two most significant bytes
3147     emit_writehword_indexed(tl,1,temp);
3148   }
3149   if (opcode[i]==0x2C) { // SDL
3150     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3151     // Write 3 msb into three least significant bytes
3152     if(rs2[i]) emit_rorimm(th,8,th);
3153     emit_writehword_indexed(th,-1,temp);
3154     if(rs2[i]) emit_rorimm(th,16,th);
3155     emit_writebyte_indexed(th,1,temp);
3156     if(rs2[i]) emit_rorimm(th,8,th);
3157   }
3158   if (opcode[i]==0x2D) { // SDR
3159     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3160     // Write two lsb into two most significant bytes
3161     emit_writehword_indexed(tl,1,temp);
3162   }
3163   done1=(int)out;
3164   emit_jmp(0);
3165   // 2
3166   set_jump_target(case2,(int)out);
3167   emit_testimm(temp,1);
3168   case3=(int)out;
3169   emit_jne(0);
3170   if (opcode[i]==0x2A) { // SWL
3171     // Write two msb into two least significant bytes
3172     if(rs2[i]) emit_rorimm(tl,16,tl);
3173     emit_writehword_indexed(tl,-2,temp);
3174     if(rs2[i]) emit_rorimm(tl,16,tl);
3175   }
3176   if (opcode[i]==0x2E) { // SWR
3177     // Write 3 lsb into three most significant bytes
3178     emit_writebyte_indexed(tl,-1,temp);
3179     if(rs2[i]) emit_rorimm(tl,8,tl);
3180     emit_writehword_indexed(tl,0,temp);
3181     if(rs2[i]) emit_rorimm(tl,24,tl);
3182   }
3183   if (opcode[i]==0x2C) { // SDL
3184     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3185     // Write two msb into two least significant bytes
3186     if(rs2[i]) emit_rorimm(th,16,th);
3187     emit_writehword_indexed(th,-2,temp);
3188     if(rs2[i]) emit_rorimm(th,16,th);
3189   }
3190   if (opcode[i]==0x2D) { // SDR
3191     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3192     // Write 3 lsb into three most significant bytes
3193     emit_writebyte_indexed(tl,-1,temp);
3194     if(rs2[i]) emit_rorimm(tl,8,tl);
3195     emit_writehword_indexed(tl,0,temp);
3196     if(rs2[i]) emit_rorimm(tl,24,tl);
3197   }
3198   done2=(int)out;
3199   emit_jmp(0);
3200   // 3
3201   set_jump_target(case3,(int)out);
3202   if (opcode[i]==0x2A) { // SWL
3203     // Write msb into least significant byte
3204     if(rs2[i]) emit_rorimm(tl,24,tl);
3205     emit_writebyte_indexed(tl,-3,temp);
3206     if(rs2[i]) emit_rorimm(tl,8,tl);
3207   }
3208   if (opcode[i]==0x2E) { // SWR
3209     // Write entire word
3210     emit_writeword_indexed(tl,-3,temp);
3211   }
3212   if (opcode[i]==0x2C) { // SDL
3213     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3214     // Write msb into least significant byte
3215     if(rs2[i]) emit_rorimm(th,24,th);
3216     emit_writebyte_indexed(th,-3,temp);
3217     if(rs2[i]) emit_rorimm(th,8,th);
3218   }
3219   if (opcode[i]==0x2D) { // SDR
3220     if(rs2[i]) emit_mov(th,temp2);
3221     // Write entire word
3222     emit_writeword_indexed(tl,-3,temp);
3223   }
3224   set_jump_target(done0,(int)out);
3225   set_jump_target(done1,(int)out);
3226   set_jump_target(done2,(int)out);
3227   if (opcode[i]==0x2C) { // SDL
3228     emit_testimm(temp,4);
3229     done0=(int)out;
3230     emit_jne(0);
3231     emit_andimm(temp,~3,temp);
3232     emit_writeword_indexed(temp2,4,temp);
3233     set_jump_target(done0,(int)out);
3234   }
3235   if (opcode[i]==0x2D) { // SDR
3236     emit_testimm(temp,4);
3237     done0=(int)out;
3238     emit_jeq(0);
3239     emit_andimm(temp,~3,temp);
3240     emit_writeword_indexed(temp2,-4,temp);
3241     set_jump_target(done0,(int)out);
3242   }
3243   if(!c||!memtarget)
3244     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3245   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3246     #ifdef RAM_OFFSET
3247     int map=get_reg(i_regs->regmap,ROREG);
3248     if(map<0) map=HOST_TEMPREG;
3249     gen_orig_addr_w(temp,map);
3250     #else
3251     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3252     #endif
3253     #if defined(HOST_IMM8)
3254     int ir=get_reg(i_regs->regmap,INVCP);
3255     assert(ir>=0);
3256     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3257     #else
3258     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3259     #endif
3260     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3261     emit_callne(invalidate_addr_reg[temp]);
3262     #else
3263     int jaddr2=(int)out;
3264     emit_jne(0);
3265     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3266     #endif
3267   }
3268   /*
3269     emit_pusha();
3270     //save_regs(0x100f);
3271         emit_readword((int)&last_count,ECX);
3272         if(get_reg(i_regs->regmap,CCREG)<0)
3273           emit_loadreg(CCREG,HOST_CCREG);
3274         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3275         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3276         emit_writeword(HOST_CCREG,(int)&Count);
3277     emit_call((int)memdebug);
3278     emit_popa();
3279     //restore_regs(0x100f);
3280   */
3281 }
3282
3283 void c1ls_assemble(int i,struct regstat *i_regs)
3284 {
3285   cop1_unusable(i, i_regs);
3286 }
3287
3288 void c2ls_assemble(int i,struct regstat *i_regs)
3289 {
3290   int s,tl;
3291   int ar;
3292   int offset;
3293   int memtarget=0,c=0;
3294   int jaddr2=0,type;
3295   int agr=AGEN1+(i&1);
3296   int fastio_reg_override=0;
3297   u_int hr,reglist=0;
3298   u_int copr=(source[i]>>16)&0x1f;
3299   s=get_reg(i_regs->regmap,rs1[i]);
3300   tl=get_reg(i_regs->regmap,FTEMP);
3301   offset=imm[i];
3302   assert(rs1[i]>0);
3303   assert(tl>=0);
3304
3305   for(hr=0;hr<HOST_REGS;hr++) {
3306     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3307   }
3308   if(i_regs->regmap[HOST_CCREG]==CCREG)
3309     reglist&=~(1<<HOST_CCREG);
3310
3311   // get the address
3312   if (opcode[i]==0x3a) { // SWC2
3313     ar=get_reg(i_regs->regmap,agr);
3314     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3315     reglist|=1<<ar;
3316   } else { // LWC2
3317     ar=tl;
3318   }
3319   if(s>=0) c=(i_regs->wasconst>>s)&1;
3320   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3321   if (!offset&&!c&&s>=0) ar=s;
3322   assert(ar>=0);
3323
3324   if (opcode[i]==0x3a) { // SWC2
3325     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3326     type=STOREW_STUB;
3327   }
3328   else
3329     type=LOADW_STUB;
3330
3331   if(c&&!memtarget) {
3332     jaddr2=(int)out;
3333     emit_jmp(0); // inline_readstub/inline_writestub?
3334   }
3335   else {
3336     if(!c) {
3337       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3338     }
3339     else if(ram_offset&&memtarget) {
3340       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3341       fastio_reg_override=HOST_TEMPREG;
3342     }
3343     if (opcode[i]==0x32) { // LWC2
3344       #ifdef HOST_IMM_ADDR32
3345       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3346       else
3347       #endif
3348       int a=ar;
3349       if(fastio_reg_override) a=fastio_reg_override;
3350       emit_readword_indexed(0,a,tl);
3351     }
3352     if (opcode[i]==0x3a) { // SWC2
3353       #ifdef DESTRUCTIVE_SHIFT
3354       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3355       #endif
3356       int a=ar;
3357       if(fastio_reg_override) a=fastio_reg_override;
3358       emit_writeword_indexed(tl,0,a);
3359     }
3360   }
3361   if(jaddr2)
3362     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3363   if(opcode[i]==0x3a) // SWC2
3364   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3365 #if defined(HOST_IMM8)
3366     int ir=get_reg(i_regs->regmap,INVCP);
3367     assert(ir>=0);
3368     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3369 #else
3370     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3371 #endif
3372     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3373     emit_callne(invalidate_addr_reg[ar]);
3374     #else
3375     int jaddr3=(int)out;
3376     emit_jne(0);
3377     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3378     #endif
3379   }
3380   if (opcode[i]==0x32) { // LWC2
3381     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3382   }
3383 }
3384
3385 #ifndef multdiv_assemble
3386 void multdiv_assemble(int i,struct regstat *i_regs)
3387 {
3388   printf("Need multdiv_assemble for this architecture.\n");
3389   exit(1);
3390 }
3391 #endif
3392
3393 void mov_assemble(int i,struct regstat *i_regs)
3394 {
3395   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3396   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3397   if(rt1[i]) {
3398     signed char sh,sl,th,tl;
3399     th=get_reg(i_regs->regmap,rt1[i]|64);
3400     tl=get_reg(i_regs->regmap,rt1[i]);
3401     //assert(tl>=0);
3402     if(tl>=0) {
3403       sh=get_reg(i_regs->regmap,rs1[i]|64);
3404       sl=get_reg(i_regs->regmap,rs1[i]);
3405       if(sl>=0) emit_mov(sl,tl);
3406       else emit_loadreg(rs1[i],tl);
3407       if(th>=0) {
3408         if(sh>=0) emit_mov(sh,th);
3409         else emit_loadreg(rs1[i]|64,th);
3410       }
3411     }
3412   }
3413 }
3414
3415 #ifndef fconv_assemble
3416 void fconv_assemble(int i,struct regstat *i_regs)
3417 {
3418   printf("Need fconv_assemble for this architecture.\n");
3419   exit(1);
3420 }
3421 #endif
3422
3423 #if 0
3424 void float_assemble(int i,struct regstat *i_regs)
3425 {
3426   printf("Need float_assemble for this architecture.\n");
3427   exit(1);
3428 }
3429 #endif
3430
3431 void syscall_assemble(int i,struct regstat *i_regs)
3432 {
3433   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3434   assert(ccreg==HOST_CCREG);
3435   assert(!is_delayslot);
3436   (void)ccreg;
3437   emit_movimm(start+i*4,EAX); // Get PC
3438   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3439   emit_jmp((int)jump_syscall_hle); // XXX
3440 }
3441
3442 void hlecall_assemble(int i,struct regstat *i_regs)
3443 {
3444   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3445   assert(ccreg==HOST_CCREG);
3446   assert(!is_delayslot);
3447   (void)ccreg;
3448   emit_movimm(start+i*4+4,0); // Get PC
3449   uint32_t hleCode = source[i] & 0x03ffffff;
3450   if (hleCode >= (sizeof(psxHLEt) / sizeof(psxHLEt[0])))
3451     emit_movimm((int)psxNULL,1);
3452   else
3453     emit_movimm((int)psxHLEt[hleCode],1);
3454   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3455   emit_jmp((int)jump_hlecall);
3456 }
3457
3458 void intcall_assemble(int i,struct regstat *i_regs)
3459 {
3460   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3461   assert(ccreg==HOST_CCREG);
3462   assert(!is_delayslot);
3463   (void)ccreg;
3464   emit_movimm(start+i*4,0); // Get PC
3465   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3466   emit_jmp((int)jump_intcall);
3467 }
3468
3469 void ds_assemble(int i,struct regstat *i_regs)
3470 {
3471   speculate_register_values(i);
3472   is_delayslot=1;
3473   switch(itype[i]) {
3474     case ALU:
3475       alu_assemble(i,i_regs);break;
3476     case IMM16:
3477       imm16_assemble(i,i_regs);break;
3478     case SHIFT:
3479       shift_assemble(i,i_regs);break;
3480     case SHIFTIMM:
3481       shiftimm_assemble(i,i_regs);break;
3482     case LOAD:
3483       load_assemble(i,i_regs);break;
3484     case LOADLR:
3485       loadlr_assemble(i,i_regs);break;
3486     case STORE:
3487       store_assemble(i,i_regs);break;
3488     case STORELR:
3489       storelr_assemble(i,i_regs);break;
3490     case COP0:
3491       cop0_assemble(i,i_regs);break;
3492     case COP1:
3493       cop1_assemble(i,i_regs);break;
3494     case C1LS:
3495       c1ls_assemble(i,i_regs);break;
3496     case COP2:
3497       cop2_assemble(i,i_regs);break;
3498     case C2LS:
3499       c2ls_assemble(i,i_regs);break;
3500     case C2OP:
3501       c2op_assemble(i,i_regs);break;
3502     case FCONV:
3503       fconv_assemble(i,i_regs);break;
3504     case FLOAT:
3505       float_assemble(i,i_regs);break;
3506     case FCOMP:
3507       fcomp_assemble(i,i_regs);break;
3508     case MULTDIV:
3509       multdiv_assemble(i,i_regs);break;
3510     case MOV:
3511       mov_assemble(i,i_regs);break;
3512     case SYSCALL:
3513     case HLECALL:
3514     case INTCALL:
3515     case SPAN:
3516     case UJUMP:
3517     case RJUMP:
3518     case CJUMP:
3519     case SJUMP:
3520     case FJUMP:
3521       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
3522   }
3523   is_delayslot=0;
3524 }
3525
3526 // Is the branch target a valid internal jump?
3527 int internal_branch(uint64_t i_is32,int addr)
3528 {
3529   if(addr&1) return 0; // Indirect (register) jump
3530   if(addr>=start && addr<start+slen*4-4)
3531   {
3532     //int t=(addr-start)>>2;
3533     // Delay slots are not valid branch targets
3534     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3535     // 64 -> 32 bit transition requires a recompile
3536     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3537     {
3538       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3539       else printf("optimizable: yes\n");
3540     }*/
3541     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3542     return 1;
3543   }
3544   return 0;
3545 }
3546
3547 #ifndef wb_invalidate
3548 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3549   uint64_t u,uint64_t uu)
3550 {
3551   int hr;
3552   for(hr=0;hr<HOST_REGS;hr++) {
3553     if(hr!=EXCLUDE_REG) {
3554       if(pre[hr]!=entry[hr]) {
3555         if(pre[hr]>=0) {
3556           if((dirty>>hr)&1) {
3557             if(get_reg(entry,pre[hr])<0) {
3558               if(pre[hr]<64) {
3559                 if(!((u>>pre[hr])&1)) {
3560                   emit_storereg(pre[hr],hr);
3561                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3562                     emit_sarimm(hr,31,hr);
3563                     emit_storereg(pre[hr]|64,hr);
3564                   }
3565                 }
3566               }else{
3567                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3568                   emit_storereg(pre[hr],hr);
3569                 }
3570               }
3571             }
3572           }
3573         }
3574       }
3575     }
3576   }
3577   // Move from one register to another (no writeback)
3578   for(hr=0;hr<HOST_REGS;hr++) {
3579     if(hr!=EXCLUDE_REG) {
3580       if(pre[hr]!=entry[hr]) {
3581         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3582           int nr;
3583           if((nr=get_reg(entry,pre[hr]))>=0) {
3584             emit_mov(hr,nr);
3585           }
3586         }
3587       }
3588     }
3589   }
3590 }
3591 #endif
3592
3593 // Load the specified registers
3594 // This only loads the registers given as arguments because
3595 // we don't want to load things that will be overwritten
3596 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3597 {
3598   int hr;
3599   // Load 32-bit regs
3600   for(hr=0;hr<HOST_REGS;hr++) {
3601     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3602       if(entry[hr]!=regmap[hr]) {
3603         if(regmap[hr]==rs1||regmap[hr]==rs2)
3604         {
3605           if(regmap[hr]==0) {
3606             emit_zeroreg(hr);
3607           }
3608           else
3609           {
3610             emit_loadreg(regmap[hr],hr);
3611           }
3612         }
3613       }
3614     }
3615   }
3616   //Load 64-bit regs
3617   for(hr=0;hr<HOST_REGS;hr++) {
3618     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3619       if(entry[hr]!=regmap[hr]) {
3620         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3621         {
3622           assert(regmap[hr]!=64);
3623           if((is32>>(regmap[hr]&63))&1) {
3624             int lr=get_reg(regmap,regmap[hr]-64);
3625             if(lr>=0)
3626               emit_sarimm(lr,31,hr);
3627             else
3628               emit_loadreg(regmap[hr],hr);
3629           }
3630           else
3631           {
3632             emit_loadreg(regmap[hr],hr);
3633           }
3634         }
3635       }
3636     }
3637   }
3638 }
3639
3640 // Load registers prior to the start of a loop
3641 // so that they are not loaded within the loop
3642 static void loop_preload(signed char pre[],signed char entry[])
3643 {
3644   int hr;
3645   for(hr=0;hr<HOST_REGS;hr++) {
3646     if(hr!=EXCLUDE_REG) {
3647       if(pre[hr]!=entry[hr]) {
3648         if(entry[hr]>=0) {
3649           if(get_reg(pre,entry[hr])<0) {
3650             assem_debug("loop preload:\n");
3651             //printf("loop preload: %d\n",hr);
3652             if(entry[hr]==0) {
3653               emit_zeroreg(hr);
3654             }
3655             else if(entry[hr]<TEMPREG)
3656             {
3657               emit_loadreg(entry[hr],hr);
3658             }
3659             else if(entry[hr]-64<TEMPREG)
3660             {
3661               emit_loadreg(entry[hr],hr);
3662             }
3663           }
3664         }
3665       }
3666     }
3667   }
3668 }
3669
3670 // Generate address for load/store instruction
3671 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3672 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3673 {
3674   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3675     int ra=-1;
3676     int agr=AGEN1+(i&1);
3677     if(itype[i]==LOAD) {
3678       ra=get_reg(i_regs->regmap,rt1[i]);
3679       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3680       assert(ra>=0);
3681     }
3682     if(itype[i]==LOADLR) {
3683       ra=get_reg(i_regs->regmap,FTEMP);
3684     }
3685     if(itype[i]==STORE||itype[i]==STORELR) {
3686       ra=get_reg(i_regs->regmap,agr);
3687       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3688     }
3689     if(itype[i]==C1LS||itype[i]==C2LS) {
3690       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3691         ra=get_reg(i_regs->regmap,FTEMP);
3692       else { // SWC1/SDC1/SWC2/SDC2
3693         ra=get_reg(i_regs->regmap,agr);
3694         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3695       }
3696     }
3697     int rs=get_reg(i_regs->regmap,rs1[i]);
3698     if(ra>=0) {
3699       int offset=imm[i];
3700       int c=(i_regs->wasconst>>rs)&1;
3701       if(rs1[i]==0) {
3702         // Using r0 as a base address
3703         if(!entry||entry[ra]!=agr) {
3704           if (opcode[i]==0x22||opcode[i]==0x26) {
3705             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3706           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3707             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3708           }else{
3709             emit_movimm(offset,ra);
3710           }
3711         } // else did it in the previous cycle
3712       }
3713       else if(rs<0) {
3714         if(!entry||entry[ra]!=rs1[i])
3715           emit_loadreg(rs1[i],ra);
3716         //if(!entry||entry[ra]!=rs1[i])
3717         //  printf("poor load scheduling!\n");
3718       }
3719       else if(c) {
3720         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3721           if(!entry||entry[ra]!=agr) {
3722             if (opcode[i]==0x22||opcode[i]==0x26) {
3723               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3724             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3725               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3726             }else{
3727               #ifdef HOST_IMM_ADDR32
3728               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3729               #endif
3730               emit_movimm(constmap[i][rs]+offset,ra);
3731               regs[i].loadedconst|=1<<ra;
3732             }
3733           } // else did it in the previous cycle
3734         } // else load_consts already did it
3735       }
3736       if(offset&&!c&&rs1[i]) {
3737         if(rs>=0) {
3738           emit_addimm(rs,offset,ra);
3739         }else{
3740           emit_addimm(ra,offset,ra);
3741         }
3742       }
3743     }
3744   }
3745   // Preload constants for next instruction
3746   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3747     int agr,ra;
3748     // Actual address
3749     agr=AGEN1+((i+1)&1);
3750     ra=get_reg(i_regs->regmap,agr);
3751     if(ra>=0) {
3752       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
3753       int offset=imm[i+1];
3754       int c=(regs[i+1].wasconst>>rs)&1;
3755       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
3756         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3757           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3758         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3759           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3760         }else{
3761           #ifdef HOST_IMM_ADDR32
3762           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3763           #endif
3764           emit_movimm(constmap[i+1][rs]+offset,ra);
3765           regs[i+1].loadedconst|=1<<ra;
3766         }
3767       }
3768       else if(rs1[i+1]==0) {
3769         // Using r0 as a base address
3770         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3771           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3772         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3773           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3774         }else{
3775           emit_movimm(offset,ra);
3776         }
3777       }
3778     }
3779   }
3780 }
3781
3782 static int get_final_value(int hr, int i, int *value)
3783 {
3784   int reg=regs[i].regmap[hr];
3785   while(i<slen-1) {
3786     if(regs[i+1].regmap[hr]!=reg) break;
3787     if(!((regs[i+1].isconst>>hr)&1)) break;
3788     if(bt[i+1]) break;
3789     i++;
3790   }
3791   if(i<slen-1) {
3792     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
3793       *value=constmap[i][hr];
3794       return 1;
3795     }
3796     if(!bt[i+1]) {
3797       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
3798         // Load in delay slot, out-of-order execution
3799         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
3800         {
3801           // Precompute load address
3802           *value=constmap[i][hr]+imm[i+2];
3803           return 1;
3804         }
3805       }
3806       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
3807       {
3808         // Precompute load address
3809         *value=constmap[i][hr]+imm[i+1];
3810         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
3811         return 1;
3812       }
3813     }
3814   }
3815   *value=constmap[i][hr];
3816   //printf("c=%x\n",(int)constmap[i][hr]);
3817   if(i==slen-1) return 1;
3818   if(reg<64) {
3819     return !((unneeded_reg[i+1]>>reg)&1);
3820   }else{
3821     return !((unneeded_reg_upper[i+1]>>reg)&1);
3822   }
3823 }
3824
3825 // Load registers with known constants
3826 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
3827 {
3828   int hr,hr2;
3829   // propagate loaded constant flags
3830   if(i==0||bt[i])
3831     regs[i].loadedconst=0;
3832   else {
3833     for(hr=0;hr<HOST_REGS;hr++) {
3834       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
3835          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
3836       {
3837         regs[i].loadedconst|=1<<hr;
3838       }
3839     }
3840   }
3841   // Load 32-bit regs
3842   for(hr=0;hr<HOST_REGS;hr++) {
3843     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3844       //if(entry[hr]!=regmap[hr]) {
3845       if(!((regs[i].loadedconst>>hr)&1)) {
3846         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3847           int value,similar=0;
3848           if(get_final_value(hr,i,&value)) {
3849             // see if some other register has similar value
3850             for(hr2=0;hr2<HOST_REGS;hr2++) {
3851               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
3852                 if(is_similar_value(value,constmap[i][hr2])) {
3853                   similar=1;
3854                   break;
3855                 }
3856               }
3857             }
3858             if(similar) {
3859               int value2;
3860               if(get_final_value(hr2,i,&value2)) // is this needed?
3861                 emit_movimm_from(value2,hr2,value,hr);
3862               else
3863                 emit_movimm(value,hr);
3864             }
3865             else if(value==0) {
3866               emit_zeroreg(hr);
3867             }
3868             else {
3869               emit_movimm(value,hr);
3870             }
3871           }
3872           regs[i].loadedconst|=1<<hr;
3873         }
3874       }
3875     }
3876   }
3877   // Load 64-bit regs
3878   for(hr=0;hr<HOST_REGS;hr++) {
3879     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3880       //if(entry[hr]!=regmap[hr]) {
3881       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
3882         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3883           if((is32>>(regmap[hr]&63))&1) {
3884             int lr=get_reg(regmap,regmap[hr]-64);
3885             assert(lr>=0);
3886             emit_sarimm(lr,31,hr);
3887           }
3888           else
3889           {
3890             int value;
3891             if(get_final_value(hr,i,&value)) {
3892               if(value==0) {
3893                 emit_zeroreg(hr);
3894               }
3895               else {
3896                 emit_movimm(value,hr);
3897               }
3898             }
3899           }
3900         }
3901       }
3902     }
3903   }
3904 }
3905 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
3906 {
3907   int hr;
3908   // Load 32-bit regs
3909   for(hr=0;hr<HOST_REGS;hr++) {
3910     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3911       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3912         int value=constmap[i][hr];
3913         if(value==0) {
3914           emit_zeroreg(hr);
3915         }
3916         else {
3917           emit_movimm(value,hr);
3918         }
3919       }
3920     }
3921   }
3922   // Load 64-bit regs
3923   for(hr=0;hr<HOST_REGS;hr++) {
3924     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3925       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3926         if((is32>>(regmap[hr]&63))&1) {
3927           int lr=get_reg(regmap,regmap[hr]-64);
3928           assert(lr>=0);
3929           emit_sarimm(lr,31,hr);
3930         }
3931         else
3932         {
3933           int value=constmap[i][hr];
3934           if(value==0) {
3935             emit_zeroreg(hr);
3936           }
3937           else {
3938             emit_movimm(value,hr);
3939           }
3940         }
3941       }
3942     }
3943   }
3944 }
3945
3946 // Write out all dirty registers (except cycle count)
3947 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
3948 {
3949   int hr;
3950   for(hr=0;hr<HOST_REGS;hr++) {
3951     if(hr!=EXCLUDE_REG) {
3952       if(i_regmap[hr]>0) {
3953         if(i_regmap[hr]!=CCREG) {
3954           if((i_dirty>>hr)&1) {
3955             if(i_regmap[hr]<64) {
3956               emit_storereg(i_regmap[hr],hr);
3957             }else{
3958               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3959                 emit_storereg(i_regmap[hr],hr);
3960               }
3961             }
3962           }
3963         }
3964       }
3965     }
3966   }
3967 }
3968 // Write out dirty registers that we need to reload (pair with load_needed_regs)
3969 // This writes the registers not written by store_regs_bt
3970 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
3971 {
3972   int hr;
3973   int t=(addr-start)>>2;
3974   for(hr=0;hr<HOST_REGS;hr++) {
3975     if(hr!=EXCLUDE_REG) {
3976       if(i_regmap[hr]>0) {
3977         if(i_regmap[hr]!=CCREG) {
3978           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
3979             if((i_dirty>>hr)&1) {
3980               if(i_regmap[hr]<64) {
3981                 emit_storereg(i_regmap[hr],hr);
3982               }else{
3983                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3984                   emit_storereg(i_regmap[hr],hr);
3985                 }
3986               }
3987             }
3988           }
3989         }
3990       }
3991     }
3992   }
3993 }
3994
3995 // Load all registers (except cycle count)
3996 void load_all_regs(signed char i_regmap[])
3997 {
3998   int hr;
3999   for(hr=0;hr<HOST_REGS;hr++) {
4000     if(hr!=EXCLUDE_REG) {
4001       if(i_regmap[hr]==0) {
4002         emit_zeroreg(hr);
4003       }
4004       else
4005       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4006       {
4007         emit_loadreg(i_regmap[hr],hr);
4008       }
4009     }
4010   }
4011 }
4012
4013 // Load all current registers also needed by next instruction
4014 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4015 {
4016   int hr;
4017   for(hr=0;hr<HOST_REGS;hr++) {
4018     if(hr!=EXCLUDE_REG) {
4019       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4020         if(i_regmap[hr]==0) {
4021           emit_zeroreg(hr);
4022         }
4023         else
4024         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4025         {
4026           emit_loadreg(i_regmap[hr],hr);
4027         }
4028       }
4029     }
4030   }
4031 }
4032
4033 // Load all regs, storing cycle count if necessary
4034 void load_regs_entry(int t)
4035 {
4036   int hr;
4037   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4038   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4039   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4040     emit_storereg(CCREG,HOST_CCREG);
4041   }
4042   // Load 32-bit regs
4043   for(hr=0;hr<HOST_REGS;hr++) {
4044     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4045       if(regs[t].regmap_entry[hr]==0) {
4046         emit_zeroreg(hr);
4047       }
4048       else if(regs[t].regmap_entry[hr]!=CCREG)
4049       {
4050         emit_loadreg(regs[t].regmap_entry[hr],hr);
4051       }
4052     }
4053   }
4054   // Load 64-bit regs
4055   for(hr=0;hr<HOST_REGS;hr++) {
4056     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4057       assert(regs[t].regmap_entry[hr]!=64);
4058       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4059         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4060         if(lr<0) {
4061           emit_loadreg(regs[t].regmap_entry[hr],hr);
4062         }
4063         else
4064         {
4065           emit_sarimm(lr,31,hr);
4066         }
4067       }
4068       else
4069       {
4070         emit_loadreg(regs[t].regmap_entry[hr],hr);
4071       }
4072     }
4073   }
4074 }
4075
4076 // Store dirty registers prior to branch
4077 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4078 {
4079   if(internal_branch(i_is32,addr))
4080   {
4081     int t=(addr-start)>>2;
4082     int hr;
4083     for(hr=0;hr<HOST_REGS;hr++) {
4084       if(hr!=EXCLUDE_REG) {
4085         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4086           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4087             if((i_dirty>>hr)&1) {
4088               if(i_regmap[hr]<64) {
4089                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4090                   emit_storereg(i_regmap[hr],hr);
4091                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4092                     #ifdef DESTRUCTIVE_WRITEBACK
4093                     emit_sarimm(hr,31,hr);
4094                     emit_storereg(i_regmap[hr]|64,hr);
4095                     #else
4096                     emit_sarimm(hr,31,HOST_TEMPREG);
4097                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4098                     #endif
4099                   }
4100                 }
4101               }else{
4102                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4103                   emit_storereg(i_regmap[hr],hr);
4104                 }
4105               }
4106             }
4107           }
4108         }
4109       }
4110     }
4111   }
4112   else
4113   {
4114     // Branch out of this block, write out all dirty regs
4115     wb_dirtys(i_regmap,i_is32,i_dirty);
4116   }
4117 }
4118
4119 // Load all needed registers for branch target
4120 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4121 {
4122   //if(addr>=start && addr<(start+slen*4))
4123   if(internal_branch(i_is32,addr))
4124   {
4125     int t=(addr-start)>>2;
4126     int hr;
4127     // Store the cycle count before loading something else
4128     if(i_regmap[HOST_CCREG]!=CCREG) {
4129       assert(i_regmap[HOST_CCREG]==-1);
4130     }
4131     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4132       emit_storereg(CCREG,HOST_CCREG);
4133     }
4134     // Load 32-bit regs
4135     for(hr=0;hr<HOST_REGS;hr++) {
4136       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4137         #ifdef DESTRUCTIVE_WRITEBACK
4138         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4139         #else
4140         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4141         #endif
4142           if(regs[t].regmap_entry[hr]==0) {
4143             emit_zeroreg(hr);
4144           }
4145           else if(regs[t].regmap_entry[hr]!=CCREG)
4146           {
4147             emit_loadreg(regs[t].regmap_entry[hr],hr);
4148           }
4149         }
4150       }
4151     }
4152     //Load 64-bit regs
4153     for(hr=0;hr<HOST_REGS;hr++) {
4154       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4155         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4156           assert(regs[t].regmap_entry[hr]!=64);
4157           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4158             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4159             if(lr<0) {
4160               emit_loadreg(regs[t].regmap_entry[hr],hr);
4161             }
4162             else
4163             {
4164               emit_sarimm(lr,31,hr);
4165             }
4166           }
4167           else
4168           {
4169             emit_loadreg(regs[t].regmap_entry[hr],hr);
4170           }
4171         }
4172         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4173           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4174           assert(lr>=0);
4175           emit_sarimm(lr,31,hr);
4176         }
4177       }
4178     }
4179   }
4180 }
4181
4182 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4183 {
4184   if(addr>=start && addr<start+slen*4-4)
4185   {
4186     int t=(addr-start)>>2;
4187     int hr;
4188     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4189     for(hr=0;hr<HOST_REGS;hr++)
4190     {
4191       if(hr!=EXCLUDE_REG)
4192       {
4193         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4194         {
4195           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4196           {
4197             return 0;
4198           }
4199           else
4200           if((i_dirty>>hr)&1)
4201           {
4202             if(i_regmap[hr]<TEMPREG)
4203             {
4204               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4205                 return 0;
4206             }
4207             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4208             {
4209               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4210                 return 0;
4211             }
4212           }
4213         }
4214         else // Same register but is it 32-bit or dirty?
4215         if(i_regmap[hr]>=0)
4216         {
4217           if(!((regs[t].dirty>>hr)&1))
4218           {
4219             if((i_dirty>>hr)&1)
4220             {
4221               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4222               {
4223                 //printf("%x: dirty no match\n",addr);
4224                 return 0;
4225               }
4226             }
4227           }
4228           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4229           {
4230             //printf("%x: is32 no match\n",addr);
4231             return 0;
4232           }
4233         }
4234       }
4235     }
4236     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4237     // Delay slots are not valid branch targets
4238     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4239     // Delay slots require additional processing, so do not match
4240     if(is_ds[t]) return 0;
4241   }
4242   else
4243   {
4244     int hr;
4245     for(hr=0;hr<HOST_REGS;hr++)
4246     {
4247       if(hr!=EXCLUDE_REG)
4248       {
4249         if(i_regmap[hr]>=0)
4250         {
4251           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4252           {
4253             if((i_dirty>>hr)&1)
4254             {
4255               return 0;
4256             }
4257           }
4258         }
4259       }
4260     }
4261   }
4262   return 1;
4263 }
4264
4265 // Used when a branch jumps into the delay slot of another branch
4266 void ds_assemble_entry(int i)
4267 {
4268   int t=(ba[i]-start)>>2;
4269   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4270   assem_debug("Assemble delay slot at %x\n",ba[i]);
4271   assem_debug("<->\n");
4272   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4273     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4274   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4275   address_generation(t,&regs[t],regs[t].regmap_entry);
4276   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4277     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4278   cop1_usable=0;
4279   is_delayslot=0;
4280   switch(itype[t]) {
4281     case ALU:
4282       alu_assemble(t,&regs[t]);break;
4283     case IMM16:
4284       imm16_assemble(t,&regs[t]);break;
4285     case SHIFT:
4286       shift_assemble(t,&regs[t]);break;
4287     case SHIFTIMM:
4288       shiftimm_assemble(t,&regs[t]);break;
4289     case LOAD:
4290       load_assemble(t,&regs[t]);break;
4291     case LOADLR:
4292       loadlr_assemble(t,&regs[t]);break;
4293     case STORE:
4294       store_assemble(t,&regs[t]);break;
4295     case STORELR:
4296       storelr_assemble(t,&regs[t]);break;
4297     case COP0:
4298       cop0_assemble(t,&regs[t]);break;
4299     case COP1:
4300       cop1_assemble(t,&regs[t]);break;
4301     case C1LS:
4302       c1ls_assemble(t,&regs[t]);break;
4303     case COP2:
4304       cop2_assemble(t,&regs[t]);break;
4305     case C2LS:
4306       c2ls_assemble(t,&regs[t]);break;
4307     case C2OP:
4308       c2op_assemble(t,&regs[t]);break;
4309     case FCONV:
4310       fconv_assemble(t,&regs[t]);break;
4311     case FLOAT:
4312       float_assemble(t,&regs[t]);break;
4313     case FCOMP:
4314       fcomp_assemble(t,&regs[t]);break;
4315     case MULTDIV:
4316       multdiv_assemble(t,&regs[t]);break;
4317     case MOV:
4318       mov_assemble(t,&regs[t]);break;
4319     case SYSCALL:
4320     case HLECALL:
4321     case INTCALL:
4322     case SPAN:
4323     case UJUMP:
4324     case RJUMP:
4325     case CJUMP:
4326     case SJUMP:
4327     case FJUMP:
4328       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4329   }
4330   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4331   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4332   if(internal_branch(regs[t].is32,ba[i]+4))
4333     assem_debug("branch: internal\n");
4334   else
4335     assem_debug("branch: external\n");
4336   assert(internal_branch(regs[t].is32,ba[i]+4));
4337   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4338   emit_jmp(0);
4339 }
4340
4341 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4342 {
4343   int count;
4344   int jaddr;
4345   int idle=0;
4346   int t=0;
4347   if(itype[i]==RJUMP)
4348   {
4349     *adj=0;
4350   }
4351   //if(ba[i]>=start && ba[i]<(start+slen*4))
4352   if(internal_branch(branch_regs[i].is32,ba[i]))
4353   {
4354     t=(ba[i]-start)>>2;
4355     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4356     else *adj=ccadj[t];
4357   }
4358   else
4359   {
4360     *adj=0;
4361   }
4362   count=ccadj[i];
4363   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4364     // Idle loop
4365     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4366     idle=(int)out;
4367     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4368     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4369     jaddr=(int)out;
4370     emit_jmp(0);
4371   }
4372   else if(*adj==0||invert) {
4373     int cycles=CLOCK_ADJUST(count+2);
4374     // faster loop HACK
4375     if (t&&*adj) {
4376       int rel=t-i;
4377       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4378         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4379     }
4380     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4381     jaddr=(int)out;
4382     emit_jns(0);
4383   }
4384   else
4385   {
4386     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4387     jaddr=(int)out;
4388     emit_jns(0);
4389   }
4390   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4391 }
4392
4393 void do_ccstub(int n)
4394 {
4395   literal_pool(256);
4396   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4397   set_jump_target(stubs[n][1],(int)out);
4398   int i=stubs[n][4];
4399   if(stubs[n][6]==NULLDS) {
4400     // Delay slot instruction is nullified ("likely" branch)
4401     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4402   }
4403   else if(stubs[n][6]!=TAKEN) {
4404     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4405   }
4406   else {
4407     if(internal_branch(branch_regs[i].is32,ba[i]))
4408       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4409   }
4410   if(stubs[n][5]!=-1)
4411   {
4412     // Save PC as return address
4413     emit_movimm(stubs[n][5],EAX);
4414     emit_writeword(EAX,(int)&pcaddr);
4415   }
4416   else
4417   {
4418     // Return address depends on which way the branch goes
4419     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4420     {
4421       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4422       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4423       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4424       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4425       if(rs1[i]==0)
4426       {
4427         s1l=s2l;s1h=s2h;
4428         s2l=s2h=-1;
4429       }
4430       else if(rs2[i]==0)
4431       {
4432         s2l=s2h=-1;
4433       }
4434       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4435         s1h=s2h=-1;
4436       }
4437       assert(s1l>=0);
4438       #ifdef DESTRUCTIVE_WRITEBACK
4439       if(rs1[i]) {
4440         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4441           emit_loadreg(rs1[i],s1l);
4442       }
4443       else {
4444         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4445           emit_loadreg(rs2[i],s1l);
4446       }
4447       if(s2l>=0)
4448         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4449           emit_loadreg(rs2[i],s2l);
4450       #endif
4451       int hr=0;
4452       int addr=-1,alt=-1,ntaddr=-1;
4453       while(hr<HOST_REGS)
4454       {
4455         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4456            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4457            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4458         {
4459           addr=hr++;break;
4460         }
4461         hr++;
4462       }
4463       while(hr<HOST_REGS)
4464       {
4465         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4466            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4467            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4468         {
4469           alt=hr++;break;
4470         }
4471         hr++;
4472       }
4473       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4474       {
4475         while(hr<HOST_REGS)
4476         {
4477           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4478              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4479              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4480           {
4481             ntaddr=hr;break;
4482           }
4483           hr++;
4484         }
4485         assert(hr<HOST_REGS);
4486       }
4487       if((opcode[i]&0x2f)==4) // BEQ
4488       {
4489         #ifdef HAVE_CMOV_IMM
4490         if(s1h<0) {
4491           if(s2l>=0) emit_cmp(s1l,s2l);
4492           else emit_test(s1l,s1l);
4493           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4494         }
4495         else
4496         #endif
4497         {
4498           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4499           if(s1h>=0) {
4500             if(s2h>=0) emit_cmp(s1h,s2h);
4501             else emit_test(s1h,s1h);
4502             emit_cmovne_reg(alt,addr);
4503           }
4504           if(s2l>=0) emit_cmp(s1l,s2l);
4505           else emit_test(s1l,s1l);
4506           emit_cmovne_reg(alt,addr);
4507         }
4508       }
4509       if((opcode[i]&0x2f)==5) // BNE
4510       {
4511         #ifdef HAVE_CMOV_IMM
4512         if(s1h<0) {
4513           if(s2l>=0) emit_cmp(s1l,s2l);
4514           else emit_test(s1l,s1l);
4515           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4516         }
4517         else
4518         #endif
4519         {
4520           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4521           if(s1h>=0) {
4522             if(s2h>=0) emit_cmp(s1h,s2h);
4523             else emit_test(s1h,s1h);
4524             emit_cmovne_reg(alt,addr);
4525           }
4526           if(s2l>=0) emit_cmp(s1l,s2l);
4527           else emit_test(s1l,s1l);
4528           emit_cmovne_reg(alt,addr);
4529         }
4530       }
4531       if((opcode[i]&0x2f)==6) // BLEZ
4532       {
4533         //emit_movimm(ba[i],alt);
4534         //emit_movimm(start+i*4+8,addr);
4535         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4536         emit_cmpimm(s1l,1);
4537         if(s1h>=0) emit_mov(addr,ntaddr);
4538         emit_cmovl_reg(alt,addr);
4539         if(s1h>=0) {
4540           emit_test(s1h,s1h);
4541           emit_cmovne_reg(ntaddr,addr);
4542           emit_cmovs_reg(alt,addr);
4543         }
4544       }
4545       if((opcode[i]&0x2f)==7) // BGTZ
4546       {
4547         //emit_movimm(ba[i],addr);
4548         //emit_movimm(start+i*4+8,ntaddr);
4549         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4550         emit_cmpimm(s1l,1);
4551         if(s1h>=0) emit_mov(addr,alt);
4552         emit_cmovl_reg(ntaddr,addr);
4553         if(s1h>=0) {
4554           emit_test(s1h,s1h);
4555           emit_cmovne_reg(alt,addr);
4556           emit_cmovs_reg(ntaddr,addr);
4557         }
4558       }
4559       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4560       {
4561         //emit_movimm(ba[i],alt);
4562         //emit_movimm(start+i*4+8,addr);
4563         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4564         if(s1h>=0) emit_test(s1h,s1h);
4565         else emit_test(s1l,s1l);
4566         emit_cmovs_reg(alt,addr);
4567       }
4568       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4569       {
4570         //emit_movimm(ba[i],addr);
4571         //emit_movimm(start+i*4+8,alt);
4572         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4573         if(s1h>=0) emit_test(s1h,s1h);
4574         else emit_test(s1l,s1l);
4575         emit_cmovs_reg(alt,addr);
4576       }
4577       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4578         if(source[i]&0x10000) // BC1T
4579         {
4580           //emit_movimm(ba[i],alt);
4581           //emit_movimm(start+i*4+8,addr);
4582           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4583           emit_testimm(s1l,0x800000);
4584           emit_cmovne_reg(alt,addr);
4585         }
4586         else // BC1F
4587         {
4588           //emit_movimm(ba[i],addr);
4589           //emit_movimm(start+i*4+8,alt);
4590           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4591           emit_testimm(s1l,0x800000);
4592           emit_cmovne_reg(alt,addr);
4593         }
4594       }
4595       emit_writeword(addr,(int)&pcaddr);
4596     }
4597     else
4598     if(itype[i]==RJUMP)
4599     {
4600       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4601       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4602         r=get_reg(branch_regs[i].regmap,RTEMP);
4603       }
4604       emit_writeword(r,(int)&pcaddr);
4605     }
4606     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
4607   }
4608   // Update cycle count
4609   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4610   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4611   emit_call((int)cc_interrupt);
4612   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4613   if(stubs[n][6]==TAKEN) {
4614     if(internal_branch(branch_regs[i].is32,ba[i]))
4615       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4616     else if(itype[i]==RJUMP) {
4617       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4618         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4619       else
4620         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4621     }
4622   }else if(stubs[n][6]==NOTTAKEN) {
4623     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4624     else load_all_regs(branch_regs[i].regmap);
4625   }else if(stubs[n][6]==NULLDS) {
4626     // Delay slot instruction is nullified ("likely" branch)
4627     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4628     else load_all_regs(regs[i].regmap);
4629   }else{
4630     load_all_regs(branch_regs[i].regmap);
4631   }
4632   emit_jmp(stubs[n][2]); // return address
4633
4634   /* This works but uses a lot of memory...
4635   emit_readword((int)&last_count,ECX);
4636   emit_add(HOST_CCREG,ECX,EAX);
4637   emit_writeword(EAX,(int)&Count);
4638   emit_call((int)gen_interupt);
4639   emit_readword((int)&Count,HOST_CCREG);
4640   emit_readword((int)&next_interupt,EAX);
4641   emit_readword((int)&pending_exception,EBX);
4642   emit_writeword(EAX,(int)&last_count);
4643   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4644   emit_test(EBX,EBX);
4645   int jne_instr=(int)out;
4646   emit_jne(0);
4647   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4648   load_all_regs(branch_regs[i].regmap);
4649   emit_jmp(stubs[n][2]); // return address
4650   set_jump_target(jne_instr,(int)out);
4651   emit_readword((int)&pcaddr,EAX);
4652   // Call get_addr_ht instead of doing the hash table here.
4653   // This code is executed infrequently and takes up a lot of space
4654   // so smaller is better.
4655   emit_storereg(CCREG,HOST_CCREG);
4656   emit_pushreg(EAX);
4657   emit_call((int)get_addr_ht);
4658   emit_loadreg(CCREG,HOST_CCREG);
4659   emit_addimm(ESP,4,ESP);
4660   emit_jmpreg(EAX);*/
4661 }
4662
4663 static void add_to_linker(int addr,int target,int ext)
4664 {
4665   link_addr[linkcount][0]=addr;
4666   link_addr[linkcount][1]=target;
4667   link_addr[linkcount][2]=ext;
4668   linkcount++;
4669 }
4670
4671 static void ujump_assemble_write_ra(int i)
4672 {
4673   int rt;
4674   unsigned int return_address;
4675   rt=get_reg(branch_regs[i].regmap,31);
4676   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4677   //assert(rt>=0);
4678   return_address=start+i*4+8;
4679   if(rt>=0) {
4680     #ifdef USE_MINI_HT
4681     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
4682       int temp=-1; // note: must be ds-safe
4683       #ifdef HOST_TEMPREG
4684       temp=HOST_TEMPREG;
4685       #endif
4686       if(temp>=0) do_miniht_insert(return_address,rt,temp);
4687       else emit_movimm(return_address,rt);
4688     }
4689     else
4690     #endif
4691     {
4692       #ifdef REG_PREFETCH
4693       if(temp>=0)
4694       {
4695         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4696       }
4697       #endif
4698       emit_movimm(return_address,rt); // PC into link register
4699       #ifdef IMM_PREFETCH
4700       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4701       #endif
4702     }
4703   }
4704 }
4705
4706 void ujump_assemble(int i,struct regstat *i_regs)
4707 {
4708   int ra_done=0;
4709   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4710   address_generation(i+1,i_regs,regs[i].regmap_entry);
4711   #ifdef REG_PREFETCH
4712   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4713   if(rt1[i]==31&&temp>=0)
4714   {
4715     signed char *i_regmap=i_regs->regmap;
4716     int return_address=start+i*4+8;
4717     if(get_reg(branch_regs[i].regmap,31)>0)
4718     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4719   }
4720   #endif
4721   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4722     ujump_assemble_write_ra(i); // writeback ra for DS
4723     ra_done=1;
4724   }
4725   ds_assemble(i+1,i_regs);
4726   uint64_t bc_unneeded=branch_regs[i].u;
4727   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4728   bc_unneeded|=1|(1LL<<rt1[i]);
4729   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4730   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4731                 bc_unneeded,bc_unneeded_upper);
4732   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4733   if(!ra_done&&rt1[i]==31)
4734     ujump_assemble_write_ra(i);
4735   int cc,adj;
4736   cc=get_reg(branch_regs[i].regmap,CCREG);
4737   assert(cc==HOST_CCREG);
4738   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4739   #ifdef REG_PREFETCH
4740   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4741   #endif
4742   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4743   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4744   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4745   if(internal_branch(branch_regs[i].is32,ba[i]))
4746     assem_debug("branch: internal\n");
4747   else
4748     assem_debug("branch: external\n");
4749   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
4750     ds_assemble_entry(i);
4751   }
4752   else {
4753     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
4754     emit_jmp(0);
4755   }
4756 }
4757
4758 static void rjump_assemble_write_ra(int i)
4759 {
4760   int rt,return_address;
4761   assert(rt1[i+1]!=rt1[i]);
4762   assert(rt2[i+1]!=rt1[i]);
4763   rt=get_reg(branch_regs[i].regmap,rt1[i]);
4764   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4765   assert(rt>=0);
4766   return_address=start+i*4+8;
4767   #ifdef REG_PREFETCH
4768   if(temp>=0)
4769   {
4770     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4771   }
4772   #endif
4773   emit_movimm(return_address,rt); // PC into link register
4774   #ifdef IMM_PREFETCH
4775   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4776   #endif
4777 }
4778
4779 void rjump_assemble(int i,struct regstat *i_regs)
4780 {
4781   int temp;
4782   int rs,cc;
4783   int ra_done=0;
4784   rs=get_reg(branch_regs[i].regmap,rs1[i]);
4785   assert(rs>=0);
4786   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4787     // Delay slot abuse, make a copy of the branch address register
4788     temp=get_reg(branch_regs[i].regmap,RTEMP);
4789     assert(temp>=0);
4790     assert(regs[i].regmap[temp]==RTEMP);
4791     emit_mov(rs,temp);
4792     rs=temp;
4793   }
4794   address_generation(i+1,i_regs,regs[i].regmap_entry);
4795   #ifdef REG_PREFETCH
4796   if(rt1[i]==31)
4797   {
4798     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4799       signed char *i_regmap=i_regs->regmap;
4800       int return_address=start+i*4+8;
4801       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4802     }
4803   }
4804   #endif
4805   #ifdef USE_MINI_HT
4806   if(rs1[i]==31) {
4807     int rh=get_reg(regs[i].regmap,RHASH);
4808     if(rh>=0) do_preload_rhash(rh);
4809   }
4810   #endif
4811   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4812     rjump_assemble_write_ra(i);
4813     ra_done=1;
4814   }
4815   ds_assemble(i+1,i_regs);
4816   uint64_t bc_unneeded=branch_regs[i].u;
4817   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4818   bc_unneeded|=1|(1LL<<rt1[i]);
4819   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4820   bc_unneeded&=~(1LL<<rs1[i]);
4821   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4822                 bc_unneeded,bc_unneeded_upper);
4823   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
4824   if(!ra_done&&rt1[i]!=0)
4825     rjump_assemble_write_ra(i);
4826   cc=get_reg(branch_regs[i].regmap,CCREG);
4827   assert(cc==HOST_CCREG);
4828   (void)cc;
4829   #ifdef USE_MINI_HT
4830   int rh=get_reg(branch_regs[i].regmap,RHASH);
4831   int ht=get_reg(branch_regs[i].regmap,RHTBL);
4832   if(rs1[i]==31) {
4833     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4834     do_preload_rhtbl(ht);
4835     do_rhash(rs,rh);
4836   }
4837   #endif
4838   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4839   #ifdef DESTRUCTIVE_WRITEBACK
4840   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
4841     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4842       emit_loadreg(rs1[i],rs);
4843     }
4844   }
4845   #endif
4846   #ifdef REG_PREFETCH
4847   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4848   #endif
4849   #ifdef USE_MINI_HT
4850   if(rs1[i]==31) {
4851     do_miniht_load(ht,rh);
4852   }
4853   #endif
4854   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4855   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4856   //assert(adj==0);
4857   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4858   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
4859   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4860     // special case for RFE
4861     emit_jmp(0);
4862   else
4863     emit_jns(0);
4864   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4865   #ifdef USE_MINI_HT
4866   if(rs1[i]==31) {
4867     do_miniht_jump(rs,rh,ht);
4868   }
4869   else
4870   #endif
4871   {
4872     //if(rs!=EAX) emit_mov(rs,EAX);
4873     //emit_jmp((int)jump_vaddr_eax);
4874     emit_jmp(jump_vaddr_reg[rs]);
4875   }
4876   /* Check hash table
4877   temp=!rs;
4878   emit_mov(rs,temp);
4879   emit_shrimm(rs,16,rs);
4880   emit_xor(temp,rs,rs);
4881   emit_movzwl_reg(rs,rs);
4882   emit_shlimm(rs,4,rs);
4883   emit_cmpmem_indexed((int)hash_table,rs,temp);
4884   emit_jne((int)out+14);
4885   emit_readword_indexed((int)hash_table+4,rs,rs);
4886   emit_jmpreg(rs);
4887   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
4888   emit_addimm_no_flags(8,rs);
4889   emit_jeq((int)out-17);
4890   // No hit on hash table, call compiler
4891   emit_pushreg(temp);
4892 //DEBUG >
4893 #ifdef DEBUG_CYCLE_COUNT
4894   emit_readword((int)&last_count,ECX);
4895   emit_add(HOST_CCREG,ECX,HOST_CCREG);
4896   emit_readword((int)&next_interupt,ECX);
4897   emit_writeword(HOST_CCREG,(int)&Count);
4898   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
4899   emit_writeword(ECX,(int)&last_count);
4900 #endif
4901 //DEBUG <
4902   emit_storereg(CCREG,HOST_CCREG);
4903   emit_call((int)get_addr);
4904   emit_loadreg(CCREG,HOST_CCREG);
4905   emit_addimm(ESP,4,ESP);
4906   emit_jmpreg(EAX);*/
4907   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4908   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4909   #endif
4910 }
4911
4912 void cjump_assemble(int i,struct regstat *i_regs)
4913 {
4914   signed char *i_regmap=i_regs->regmap;
4915   int cc;
4916   int match;
4917   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4918   assem_debug("match=%d\n",match);
4919   int s1h,s1l,s2h,s2l;
4920   int prev_cop1_usable=cop1_usable;
4921   int unconditional=0,nop=0;
4922   int only32=0;
4923   int invert=0;
4924   int internal=internal_branch(branch_regs[i].is32,ba[i]);
4925   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4926   if(!match) invert=1;
4927   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4928   if(i>(ba[i]-start)>>2) invert=1;
4929   #endif
4930
4931   if(ooo[i]) {
4932     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4933     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4934     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4935     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4936   }
4937   else {
4938     s1l=get_reg(i_regmap,rs1[i]);
4939     s1h=get_reg(i_regmap,rs1[i]|64);
4940     s2l=get_reg(i_regmap,rs2[i]);
4941     s2h=get_reg(i_regmap,rs2[i]|64);
4942   }
4943   if(rs1[i]==0&&rs2[i]==0)
4944   {
4945     if(opcode[i]&1) nop=1;
4946     else unconditional=1;
4947     //assert(opcode[i]!=5);
4948     //assert(opcode[i]!=7);
4949     //assert(opcode[i]!=0x15);
4950     //assert(opcode[i]!=0x17);
4951   }
4952   else if(rs1[i]==0)
4953   {
4954     s1l=s2l;s1h=s2h;
4955     s2l=s2h=-1;
4956     only32=(regs[i].was32>>rs2[i])&1;
4957   }
4958   else if(rs2[i]==0)
4959   {
4960     s2l=s2h=-1;
4961     only32=(regs[i].was32>>rs1[i])&1;
4962   }
4963   else {
4964     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
4965   }
4966
4967   if(ooo[i]) {
4968     // Out of order execution (delay slot first)
4969     //printf("OOOE\n");
4970     address_generation(i+1,i_regs,regs[i].regmap_entry);
4971     ds_assemble(i+1,i_regs);
4972     int adj;
4973     uint64_t bc_unneeded=branch_regs[i].u;
4974     uint64_t bc_unneeded_upper=branch_regs[i].uu;
4975     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
4976     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
4977     bc_unneeded|=1;
4978     bc_unneeded_upper|=1;
4979     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4980                   bc_unneeded,bc_unneeded_upper);
4981     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
4982     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4983     cc=get_reg(branch_regs[i].regmap,CCREG);
4984     assert(cc==HOST_CCREG);
4985     if(unconditional)
4986       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4987     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
4988     //assem_debug("cycle count (adj)\n");
4989     if(unconditional) {
4990       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4991       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
4992         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4993         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4994         if(internal)
4995           assem_debug("branch: internal\n");
4996         else
4997           assem_debug("branch: external\n");
4998         if(internal&&is_ds[(ba[i]-start)>>2]) {
4999           ds_assemble_entry(i);
5000         }
5001         else {
5002           add_to_linker((int)out,ba[i],internal);
5003           emit_jmp(0);
5004         }
5005         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5006         if(((u_int)out)&7) emit_addnop(0);
5007         #endif
5008       }
5009     }
5010     else if(nop) {
5011       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5012       int jaddr=(int)out;
5013       emit_jns(0);
5014       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5015     }
5016     else {
5017       int taken=0,nottaken=0,nottaken1=0;
5018       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5019       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5020       if(!only32)
5021       {
5022         assert(s1h>=0);
5023         if(opcode[i]==4) // BEQ
5024         {
5025           if(s2h>=0) emit_cmp(s1h,s2h);
5026           else emit_test(s1h,s1h);
5027           nottaken1=(int)out;
5028           emit_jne(1);
5029         }
5030         if(opcode[i]==5) // BNE
5031         {
5032           if(s2h>=0) emit_cmp(s1h,s2h);
5033           else emit_test(s1h,s1h);
5034           if(invert) taken=(int)out;
5035           else add_to_linker((int)out,ba[i],internal);
5036           emit_jne(0);
5037         }
5038         if(opcode[i]==6) // BLEZ
5039         {
5040           emit_test(s1h,s1h);
5041           if(invert) taken=(int)out;
5042           else add_to_linker((int)out,ba[i],internal);
5043           emit_js(0);
5044           nottaken1=(int)out;
5045           emit_jne(1);
5046         }
5047         if(opcode[i]==7) // BGTZ
5048         {
5049           emit_test(s1h,s1h);
5050           nottaken1=(int)out;
5051           emit_js(1);
5052           if(invert) taken=(int)out;
5053           else add_to_linker((int)out,ba[i],internal);
5054           emit_jne(0);
5055         }
5056       } // if(!only32)
5057
5058       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5059       assert(s1l>=0);
5060       if(opcode[i]==4) // BEQ
5061       {
5062         if(s2l>=0) emit_cmp(s1l,s2l);
5063         else emit_test(s1l,s1l);
5064         if(invert){
5065           nottaken=(int)out;
5066           emit_jne(1);
5067         }else{
5068           add_to_linker((int)out,ba[i],internal);
5069           emit_jeq(0);
5070         }
5071       }
5072       if(opcode[i]==5) // BNE
5073       {
5074         if(s2l>=0) emit_cmp(s1l,s2l);
5075         else emit_test(s1l,s1l);
5076         if(invert){
5077           nottaken=(int)out;
5078           emit_jeq(1);
5079         }else{
5080           add_to_linker((int)out,ba[i],internal);
5081           emit_jne(0);
5082         }
5083       }
5084       if(opcode[i]==6) // BLEZ
5085       {
5086         emit_cmpimm(s1l,1);
5087         if(invert){
5088           nottaken=(int)out;
5089           emit_jge(1);
5090         }else{
5091           add_to_linker((int)out,ba[i],internal);
5092           emit_jl(0);
5093         }
5094       }
5095       if(opcode[i]==7) // BGTZ
5096       {
5097         emit_cmpimm(s1l,1);
5098         if(invert){
5099           nottaken=(int)out;
5100           emit_jl(1);
5101         }else{
5102           add_to_linker((int)out,ba[i],internal);
5103           emit_jge(0);
5104         }
5105       }
5106       if(invert) {
5107         if(taken) set_jump_target(taken,(int)out);
5108         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5109         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5110           if(adj) {
5111             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5112             add_to_linker((int)out,ba[i],internal);
5113           }else{
5114             emit_addnop(13);
5115             add_to_linker((int)out,ba[i],internal*2);
5116           }
5117           emit_jmp(0);
5118         }else
5119         #endif
5120         {
5121           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5122           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5123           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5124           if(internal)
5125             assem_debug("branch: internal\n");
5126           else
5127             assem_debug("branch: external\n");
5128           if(internal&&is_ds[(ba[i]-start)>>2]) {
5129             ds_assemble_entry(i);
5130           }
5131           else {
5132             add_to_linker((int)out,ba[i],internal);
5133             emit_jmp(0);
5134           }
5135         }
5136         set_jump_target(nottaken,(int)out);
5137       }
5138
5139       if(nottaken1) set_jump_target(nottaken1,(int)out);
5140       if(adj) {
5141         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5142       }
5143     } // (!unconditional)
5144   } // if(ooo)
5145   else
5146   {
5147     // In-order execution (branch first)
5148     //if(likely[i]) printf("IOL\n");
5149     //else
5150     //printf("IOE\n");
5151     int taken=0,nottaken=0,nottaken1=0;
5152     if(!unconditional&&!nop) {
5153       if(!only32)
5154       {
5155         assert(s1h>=0);
5156         if((opcode[i]&0x2f)==4) // BEQ
5157         {
5158           if(s2h>=0) emit_cmp(s1h,s2h);
5159           else emit_test(s1h,s1h);
5160           nottaken1=(int)out;
5161           emit_jne(2);
5162         }
5163         if((opcode[i]&0x2f)==5) // BNE
5164         {
5165           if(s2h>=0) emit_cmp(s1h,s2h);
5166           else emit_test(s1h,s1h);
5167           taken=(int)out;
5168           emit_jne(1);
5169         }
5170         if((opcode[i]&0x2f)==6) // BLEZ
5171         {
5172           emit_test(s1h,s1h);
5173           taken=(int)out;
5174           emit_js(1);
5175           nottaken1=(int)out;
5176           emit_jne(2);
5177         }
5178         if((opcode[i]&0x2f)==7) // BGTZ
5179         {
5180           emit_test(s1h,s1h);
5181           nottaken1=(int)out;
5182           emit_js(2);
5183           taken=(int)out;
5184           emit_jne(1);
5185         }
5186       } // if(!only32)
5187
5188       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5189       assert(s1l>=0);
5190       if((opcode[i]&0x2f)==4) // BEQ
5191       {
5192         if(s2l>=0) emit_cmp(s1l,s2l);
5193         else emit_test(s1l,s1l);
5194         nottaken=(int)out;
5195         emit_jne(2);
5196       }
5197       if((opcode[i]&0x2f)==5) // BNE
5198       {
5199         if(s2l>=0) emit_cmp(s1l,s2l);
5200         else emit_test(s1l,s1l);
5201         nottaken=(int)out;
5202         emit_jeq(2);
5203       }
5204       if((opcode[i]&0x2f)==6) // BLEZ
5205       {
5206         emit_cmpimm(s1l,1);
5207         nottaken=(int)out;
5208         emit_jge(2);
5209       }
5210       if((opcode[i]&0x2f)==7) // BGTZ
5211       {
5212         emit_cmpimm(s1l,1);
5213         nottaken=(int)out;
5214         emit_jl(2);
5215       }
5216     } // if(!unconditional)
5217     int adj;
5218     uint64_t ds_unneeded=branch_regs[i].u;
5219     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5220     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5221     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5222     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5223     ds_unneeded|=1;
5224     ds_unneeded_upper|=1;
5225     // branch taken
5226     if(!nop) {
5227       if(taken) set_jump_target(taken,(int)out);
5228       assem_debug("1:\n");
5229       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5230                     ds_unneeded,ds_unneeded_upper);
5231       // load regs
5232       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5233       address_generation(i+1,&branch_regs[i],0);
5234       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5235       ds_assemble(i+1,&branch_regs[i]);
5236       cc=get_reg(branch_regs[i].regmap,CCREG);
5237       if(cc==-1) {
5238         emit_loadreg(CCREG,cc=HOST_CCREG);
5239         // CHECK: Is the following instruction (fall thru) allocated ok?
5240       }
5241       assert(cc==HOST_CCREG);
5242       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5243       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5244       assem_debug("cycle count (adj)\n");
5245       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5246       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5247       if(internal)
5248         assem_debug("branch: internal\n");
5249       else
5250         assem_debug("branch: external\n");
5251       if(internal&&is_ds[(ba[i]-start)>>2]) {
5252         ds_assemble_entry(i);
5253       }
5254       else {
5255         add_to_linker((int)out,ba[i],internal);
5256         emit_jmp(0);
5257       }
5258     }
5259     // branch not taken
5260     cop1_usable=prev_cop1_usable;
5261     if(!unconditional) {
5262       if(nottaken1) set_jump_target(nottaken1,(int)out);
5263       set_jump_target(nottaken,(int)out);
5264       assem_debug("2:\n");
5265       if(!likely[i]) {
5266         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5267                       ds_unneeded,ds_unneeded_upper);
5268         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5269         address_generation(i+1,&branch_regs[i],0);
5270         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5271         ds_assemble(i+1,&branch_regs[i]);
5272       }
5273       cc=get_reg(branch_regs[i].regmap,CCREG);
5274       if(cc==-1&&!likely[i]) {
5275         // Cycle count isn't in a register, temporarily load it then write it out
5276         emit_loadreg(CCREG,HOST_CCREG);
5277         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5278         int jaddr=(int)out;
5279         emit_jns(0);
5280         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5281         emit_storereg(CCREG,HOST_CCREG);
5282       }
5283       else{
5284         cc=get_reg(i_regmap,CCREG);
5285         assert(cc==HOST_CCREG);
5286         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5287         int jaddr=(int)out;
5288         emit_jns(0);
5289         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5290       }
5291     }
5292   }
5293 }
5294
5295 void sjump_assemble(int i,struct regstat *i_regs)
5296 {
5297   signed char *i_regmap=i_regs->regmap;
5298   int cc;
5299   int match;
5300   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5301   assem_debug("smatch=%d\n",match);
5302   int s1h,s1l;
5303   int prev_cop1_usable=cop1_usable;
5304   int unconditional=0,nevertaken=0;
5305   int only32=0;
5306   int invert=0;
5307   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5308   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5309   if(!match) invert=1;
5310   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5311   if(i>(ba[i]-start)>>2) invert=1;
5312   #endif
5313
5314   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5315   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5316
5317   if(ooo[i]) {
5318     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5319     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5320   }
5321   else {
5322     s1l=get_reg(i_regmap,rs1[i]);
5323     s1h=get_reg(i_regmap,rs1[i]|64);
5324   }
5325   if(rs1[i]==0)
5326   {
5327     if(opcode2[i]&1) unconditional=1;
5328     else nevertaken=1;
5329     // These are never taken (r0 is never less than zero)
5330     //assert(opcode2[i]!=0);
5331     //assert(opcode2[i]!=2);
5332     //assert(opcode2[i]!=0x10);
5333     //assert(opcode2[i]!=0x12);
5334   }
5335   else {
5336     only32=(regs[i].was32>>rs1[i])&1;
5337   }
5338
5339   if(ooo[i]) {
5340     // Out of order execution (delay slot first)
5341     //printf("OOOE\n");
5342     address_generation(i+1,i_regs,regs[i].regmap_entry);
5343     ds_assemble(i+1,i_regs);
5344     int adj;
5345     uint64_t bc_unneeded=branch_regs[i].u;
5346     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5347     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5348     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5349     bc_unneeded|=1;
5350     bc_unneeded_upper|=1;
5351     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5352                   bc_unneeded,bc_unneeded_upper);
5353     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5354     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5355     if(rt1[i]==31) {
5356       int rt,return_address;
5357       rt=get_reg(branch_regs[i].regmap,31);
5358       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5359       if(rt>=0) {
5360         // Save the PC even if the branch is not taken
5361         return_address=start+i*4+8;
5362         emit_movimm(return_address,rt); // PC into link register
5363         #ifdef IMM_PREFETCH
5364         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5365         #endif
5366       }
5367     }
5368     cc=get_reg(branch_regs[i].regmap,CCREG);
5369     assert(cc==HOST_CCREG);
5370     if(unconditional)
5371       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5372     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5373     assem_debug("cycle count (adj)\n");
5374     if(unconditional) {
5375       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5376       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5377         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5378         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5379         if(internal)
5380           assem_debug("branch: internal\n");
5381         else
5382           assem_debug("branch: external\n");
5383         if(internal&&is_ds[(ba[i]-start)>>2]) {
5384           ds_assemble_entry(i);
5385         }
5386         else {
5387           add_to_linker((int)out,ba[i],internal);
5388           emit_jmp(0);
5389         }
5390         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5391         if(((u_int)out)&7) emit_addnop(0);
5392         #endif
5393       }
5394     }
5395     else if(nevertaken) {
5396       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5397       int jaddr=(int)out;
5398       emit_jns(0);
5399       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5400     }
5401     else {
5402       int nottaken=0;
5403       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5404       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5405       if(!only32)
5406       {
5407         assert(s1h>=0);
5408         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5409         {
5410           emit_test(s1h,s1h);
5411           if(invert){
5412             nottaken=(int)out;
5413             emit_jns(1);
5414           }else{
5415             add_to_linker((int)out,ba[i],internal);
5416             emit_js(0);
5417           }
5418         }
5419         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5420         {
5421           emit_test(s1h,s1h);
5422           if(invert){
5423             nottaken=(int)out;
5424             emit_js(1);
5425           }else{
5426             add_to_linker((int)out,ba[i],internal);
5427             emit_jns(0);
5428           }
5429         }
5430       } // if(!only32)
5431       else
5432       {
5433         assert(s1l>=0);
5434         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5435         {
5436           emit_test(s1l,s1l);
5437           if(invert){
5438             nottaken=(int)out;
5439             emit_jns(1);
5440           }else{
5441             add_to_linker((int)out,ba[i],internal);
5442             emit_js(0);
5443           }
5444         }
5445         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5446         {
5447           emit_test(s1l,s1l);
5448           if(invert){
5449             nottaken=(int)out;
5450             emit_js(1);
5451           }else{
5452             add_to_linker((int)out,ba[i],internal);
5453             emit_jns(0);
5454           }
5455         }
5456       } // if(!only32)
5457
5458       if(invert) {
5459         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5460         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5461           if(adj) {
5462             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5463             add_to_linker((int)out,ba[i],internal);
5464           }else{
5465             emit_addnop(13);
5466             add_to_linker((int)out,ba[i],internal*2);
5467           }
5468           emit_jmp(0);
5469         }else
5470         #endif
5471         {
5472           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5473           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5474           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5475           if(internal)
5476             assem_debug("branch: internal\n");
5477           else
5478             assem_debug("branch: external\n");
5479           if(internal&&is_ds[(ba[i]-start)>>2]) {
5480             ds_assemble_entry(i);
5481           }
5482           else {
5483             add_to_linker((int)out,ba[i],internal);
5484             emit_jmp(0);
5485           }
5486         }
5487         set_jump_target(nottaken,(int)out);
5488       }
5489
5490       if(adj) {
5491         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5492       }
5493     } // (!unconditional)
5494   } // if(ooo)
5495   else
5496   {
5497     // In-order execution (branch first)
5498     //printf("IOE\n");
5499     int nottaken=0;
5500     if(rt1[i]==31) {
5501       int rt,return_address;
5502       rt=get_reg(branch_regs[i].regmap,31);
5503       if(rt>=0) {
5504         // Save the PC even if the branch is not taken
5505         return_address=start+i*4+8;
5506         emit_movimm(return_address,rt); // PC into link register
5507         #ifdef IMM_PREFETCH
5508         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5509         #endif
5510       }
5511     }
5512     if(!unconditional) {
5513       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5514       if(!only32)
5515       {
5516         assert(s1h>=0);
5517         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5518         {
5519           emit_test(s1h,s1h);
5520           nottaken=(int)out;
5521           emit_jns(1);
5522         }
5523         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5524         {
5525           emit_test(s1h,s1h);
5526           nottaken=(int)out;
5527           emit_js(1);
5528         }
5529       } // if(!only32)
5530       else
5531       {
5532         assert(s1l>=0);
5533         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5534         {
5535           emit_test(s1l,s1l);
5536           nottaken=(int)out;
5537           emit_jns(1);
5538         }
5539         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5540         {
5541           emit_test(s1l,s1l);
5542           nottaken=(int)out;
5543           emit_js(1);
5544         }
5545       }
5546     } // if(!unconditional)
5547     int adj;
5548     uint64_t ds_unneeded=branch_regs[i].u;
5549     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5550     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5551     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5552     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5553     ds_unneeded|=1;
5554     ds_unneeded_upper|=1;
5555     // branch taken
5556     if(!nevertaken) {
5557       //assem_debug("1:\n");
5558       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5559                     ds_unneeded,ds_unneeded_upper);
5560       // load regs
5561       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5562       address_generation(i+1,&branch_regs[i],0);
5563       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5564       ds_assemble(i+1,&branch_regs[i]);
5565       cc=get_reg(branch_regs[i].regmap,CCREG);
5566       if(cc==-1) {
5567         emit_loadreg(CCREG,cc=HOST_CCREG);
5568         // CHECK: Is the following instruction (fall thru) allocated ok?
5569       }
5570       assert(cc==HOST_CCREG);
5571       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5572       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5573       assem_debug("cycle count (adj)\n");
5574       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5575       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5576       if(internal)
5577         assem_debug("branch: internal\n");
5578       else
5579         assem_debug("branch: external\n");
5580       if(internal&&is_ds[(ba[i]-start)>>2]) {
5581         ds_assemble_entry(i);
5582       }
5583       else {
5584         add_to_linker((int)out,ba[i],internal);
5585         emit_jmp(0);
5586       }
5587     }
5588     // branch not taken
5589     cop1_usable=prev_cop1_usable;
5590     if(!unconditional) {
5591       set_jump_target(nottaken,(int)out);
5592       assem_debug("1:\n");
5593       if(!likely[i]) {
5594         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5595                       ds_unneeded,ds_unneeded_upper);
5596         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5597         address_generation(i+1,&branch_regs[i],0);
5598         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5599         ds_assemble(i+1,&branch_regs[i]);
5600       }
5601       cc=get_reg(branch_regs[i].regmap,CCREG);
5602       if(cc==-1&&!likely[i]) {
5603         // Cycle count isn't in a register, temporarily load it then write it out
5604         emit_loadreg(CCREG,HOST_CCREG);
5605         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5606         int jaddr=(int)out;
5607         emit_jns(0);
5608         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5609         emit_storereg(CCREG,HOST_CCREG);
5610       }
5611       else{
5612         cc=get_reg(i_regmap,CCREG);
5613         assert(cc==HOST_CCREG);
5614         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5615         int jaddr=(int)out;
5616         emit_jns(0);
5617         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5618       }
5619     }
5620   }
5621 }
5622
5623 void fjump_assemble(int i,struct regstat *i_regs)
5624 {
5625   signed char *i_regmap=i_regs->regmap;
5626   int cc;
5627   int match;
5628   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5629   assem_debug("fmatch=%d\n",match);
5630   int fs,cs;
5631   int eaddr;
5632   int invert=0;
5633   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5634   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5635   if(!match) invert=1;
5636   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5637   if(i>(ba[i]-start)>>2) invert=1;
5638   #endif
5639
5640   if(ooo[i]) {
5641     fs=get_reg(branch_regs[i].regmap,FSREG);
5642     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5643   }
5644   else {
5645     fs=get_reg(i_regmap,FSREG);
5646   }
5647
5648   // Check cop1 unusable
5649   if(!cop1_usable) {
5650     cs=get_reg(i_regmap,CSREG);
5651     assert(cs>=0);
5652     emit_testimm(cs,0x20000000);
5653     eaddr=(int)out;
5654     emit_jeq(0);
5655     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5656     cop1_usable=1;
5657   }
5658
5659   if(ooo[i]) {
5660     // Out of order execution (delay slot first)
5661     //printf("OOOE\n");
5662     ds_assemble(i+1,i_regs);
5663     int adj;
5664     uint64_t bc_unneeded=branch_regs[i].u;
5665     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5666     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5667     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5668     bc_unneeded|=1;
5669     bc_unneeded_upper|=1;
5670     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5671                   bc_unneeded,bc_unneeded_upper);
5672     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5673     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5674     cc=get_reg(branch_regs[i].regmap,CCREG);
5675     assert(cc==HOST_CCREG);
5676     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5677     assem_debug("cycle count (adj)\n");
5678     if(1) {
5679       int nottaken=0;
5680       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5681       if(1) {
5682         assert(fs>=0);
5683         emit_testimm(fs,0x800000);
5684         if(source[i]&0x10000) // BC1T
5685         {
5686           if(invert){
5687             nottaken=(int)out;
5688             emit_jeq(1);
5689           }else{
5690             add_to_linker((int)out,ba[i],internal);
5691             emit_jne(0);
5692           }
5693         }
5694         else // BC1F
5695           if(invert){
5696             nottaken=(int)out;
5697             emit_jne(1);
5698           }else{
5699             add_to_linker((int)out,ba[i],internal);
5700             emit_jeq(0);
5701           }
5702         {
5703         }
5704       } // if(!only32)
5705
5706       if(invert) {
5707         if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5708         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5709         else if(match) emit_addnop(13);
5710         #endif
5711         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5712         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5713         if(internal)
5714           assem_debug("branch: internal\n");
5715         else
5716           assem_debug("branch: external\n");
5717         if(internal&&is_ds[(ba[i]-start)>>2]) {
5718           ds_assemble_entry(i);
5719         }
5720         else {
5721           add_to_linker((int)out,ba[i],internal);
5722           emit_jmp(0);
5723         }
5724         set_jump_target(nottaken,(int)out);
5725       }
5726
5727       if(adj) {
5728         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5729       }
5730     } // (!unconditional)
5731   } // if(ooo)
5732   else
5733   {
5734     // In-order execution (branch first)
5735     //printf("IOE\n");
5736     int nottaken=0;
5737     if(1) {
5738       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5739       if(1) {
5740         assert(fs>=0);
5741         emit_testimm(fs,0x800000);
5742         if(source[i]&0x10000) // BC1T
5743         {
5744           nottaken=(int)out;
5745           emit_jeq(1);
5746         }
5747         else // BC1F
5748         {
5749           nottaken=(int)out;
5750           emit_jne(1);
5751         }
5752       }
5753     } // if(!unconditional)
5754     int adj;
5755     uint64_t ds_unneeded=branch_regs[i].u;
5756     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5757     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5758     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5759     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5760     ds_unneeded|=1;
5761     ds_unneeded_upper|=1;
5762     // branch taken
5763     //assem_debug("1:\n");
5764     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5765                   ds_unneeded,ds_unneeded_upper);
5766     // load regs
5767     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5768     address_generation(i+1,&branch_regs[i],0);
5769     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5770     ds_assemble(i+1,&branch_regs[i]);
5771     cc=get_reg(branch_regs[i].regmap,CCREG);
5772     if(cc==-1) {
5773       emit_loadreg(CCREG,cc=HOST_CCREG);
5774       // CHECK: Is the following instruction (fall thru) allocated ok?
5775     }
5776     assert(cc==HOST_CCREG);
5777     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5778     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5779     assem_debug("cycle count (adj)\n");
5780     if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5781     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5782     if(internal)
5783       assem_debug("branch: internal\n");
5784     else
5785       assem_debug("branch: external\n");
5786     if(internal&&is_ds[(ba[i]-start)>>2]) {
5787       ds_assemble_entry(i);
5788     }
5789     else {
5790       add_to_linker((int)out,ba[i],internal);
5791       emit_jmp(0);
5792     }
5793
5794     // branch not taken
5795     if(1) { // <- FIXME (don't need this)
5796       set_jump_target(nottaken,(int)out);
5797       assem_debug("1:\n");
5798       if(!likely[i]) {
5799         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5800                       ds_unneeded,ds_unneeded_upper);
5801         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5802         address_generation(i+1,&branch_regs[i],0);
5803         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5804         ds_assemble(i+1,&branch_regs[i]);
5805       }
5806       cc=get_reg(branch_regs[i].regmap,CCREG);
5807       if(cc==-1&&!likely[i]) {
5808         // Cycle count isn't in a register, temporarily load it then write it out
5809         emit_loadreg(CCREG,HOST_CCREG);
5810         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5811         int jaddr=(int)out;
5812         emit_jns(0);
5813         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5814         emit_storereg(CCREG,HOST_CCREG);
5815       }
5816       else{
5817         cc=get_reg(i_regmap,CCREG);
5818         assert(cc==HOST_CCREG);
5819         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5820         int jaddr=(int)out;
5821         emit_jns(0);
5822         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5823       }
5824     }
5825   }
5826 }
5827
5828 static void pagespan_assemble(int i,struct regstat *i_regs)
5829 {
5830   int s1l=get_reg(i_regs->regmap,rs1[i]);
5831   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
5832   int s2l=get_reg(i_regs->regmap,rs2[i]);
5833   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
5834   int taken=0;
5835   int nottaken=0;
5836   int unconditional=0;
5837   if(rs1[i]==0)
5838   {
5839     s1l=s2l;s1h=s2h;
5840     s2l=s2h=-1;
5841   }
5842   else if(rs2[i]==0)
5843   {
5844     s2l=s2h=-1;
5845   }
5846   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
5847     s1h=s2h=-1;
5848   }
5849   int hr=0;
5850   int addr=-1,alt=-1,ntaddr=-1;
5851   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5852   else {
5853     while(hr<HOST_REGS)
5854     {
5855       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5856          (i_regs->regmap[hr]&63)!=rs1[i] &&
5857          (i_regs->regmap[hr]&63)!=rs2[i] )
5858       {
5859         addr=hr++;break;
5860       }
5861       hr++;
5862     }
5863   }
5864   while(hr<HOST_REGS)
5865   {
5866     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5867        (i_regs->regmap[hr]&63)!=rs1[i] &&
5868        (i_regs->regmap[hr]&63)!=rs2[i] )
5869     {
5870       alt=hr++;break;
5871     }
5872     hr++;
5873   }
5874   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5875   {
5876     while(hr<HOST_REGS)
5877     {
5878       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5879          (i_regs->regmap[hr]&63)!=rs1[i] &&
5880          (i_regs->regmap[hr]&63)!=rs2[i] )
5881       {
5882         ntaddr=hr;break;
5883       }
5884       hr++;
5885     }
5886   }
5887   assert(hr<HOST_REGS);
5888   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5889     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
5890   }
5891   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5892   if(opcode[i]==2) // J
5893   {
5894     unconditional=1;
5895   }
5896   if(opcode[i]==3) // JAL
5897   {
5898     // TODO: mini_ht
5899     int rt=get_reg(i_regs->regmap,31);
5900     emit_movimm(start+i*4+8,rt);
5901     unconditional=1;
5902   }
5903   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5904   {
5905     emit_mov(s1l,addr);
5906     if(opcode2[i]==9) // JALR
5907     {
5908       int rt=get_reg(i_regs->regmap,rt1[i]);
5909       emit_movimm(start+i*4+8,rt);
5910     }
5911   }
5912   if((opcode[i]&0x3f)==4) // BEQ
5913   {
5914     if(rs1[i]==rs2[i])
5915     {
5916       unconditional=1;
5917     }
5918     else
5919     #ifdef HAVE_CMOV_IMM
5920     if(s1h<0) {
5921       if(s2l>=0) emit_cmp(s1l,s2l);
5922       else emit_test(s1l,s1l);
5923       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5924     }
5925     else
5926     #endif
5927     {
5928       assert(s1l>=0);
5929       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5930       if(s1h>=0) {
5931         if(s2h>=0) emit_cmp(s1h,s2h);
5932         else emit_test(s1h,s1h);
5933         emit_cmovne_reg(alt,addr);
5934       }
5935       if(s2l>=0) emit_cmp(s1l,s2l);
5936       else emit_test(s1l,s1l);
5937       emit_cmovne_reg(alt,addr);
5938     }
5939   }
5940   if((opcode[i]&0x3f)==5) // BNE
5941   {
5942     #ifdef HAVE_CMOV_IMM
5943     if(s1h<0) {
5944       if(s2l>=0) emit_cmp(s1l,s2l);
5945       else emit_test(s1l,s1l);
5946       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5947     }
5948     else
5949     #endif
5950     {
5951       assert(s1l>=0);
5952       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5953       if(s1h>=0) {
5954         if(s2h>=0) emit_cmp(s1h,s2h);
5955         else emit_test(s1h,s1h);
5956         emit_cmovne_reg(alt,addr);
5957       }
5958       if(s2l>=0) emit_cmp(s1l,s2l);
5959       else emit_test(s1l,s1l);
5960       emit_cmovne_reg(alt,addr);
5961     }
5962   }
5963   if((opcode[i]&0x3f)==0x14) // BEQL
5964   {
5965     if(s1h>=0) {
5966       if(s2h>=0) emit_cmp(s1h,s2h);
5967       else emit_test(s1h,s1h);
5968       nottaken=(int)out;
5969       emit_jne(0);
5970     }
5971     if(s2l>=0) emit_cmp(s1l,s2l);
5972     else emit_test(s1l,s1l);
5973     if(nottaken) set_jump_target(nottaken,(int)out);
5974     nottaken=(int)out;
5975     emit_jne(0);
5976   }
5977   if((opcode[i]&0x3f)==0x15) // BNEL
5978   {
5979     if(s1h>=0) {
5980       if(s2h>=0) emit_cmp(s1h,s2h);
5981       else emit_test(s1h,s1h);
5982       taken=(int)out;
5983       emit_jne(0);
5984     }
5985     if(s2l>=0) emit_cmp(s1l,s2l);
5986     else emit_test(s1l,s1l);
5987     nottaken=(int)out;
5988     emit_jeq(0);
5989     if(taken) set_jump_target(taken,(int)out);
5990   }
5991   if((opcode[i]&0x3f)==6) // BLEZ
5992   {
5993     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5994     emit_cmpimm(s1l,1);
5995     if(s1h>=0) emit_mov(addr,ntaddr);
5996     emit_cmovl_reg(alt,addr);
5997     if(s1h>=0) {
5998       emit_test(s1h,s1h);
5999       emit_cmovne_reg(ntaddr,addr);
6000       emit_cmovs_reg(alt,addr);
6001     }
6002   }
6003   if((opcode[i]&0x3f)==7) // BGTZ
6004   {
6005     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6006     emit_cmpimm(s1l,1);
6007     if(s1h>=0) emit_mov(addr,alt);
6008     emit_cmovl_reg(ntaddr,addr);
6009     if(s1h>=0) {
6010       emit_test(s1h,s1h);
6011       emit_cmovne_reg(alt,addr);
6012       emit_cmovs_reg(ntaddr,addr);
6013     }
6014   }
6015   if((opcode[i]&0x3f)==0x16) // BLEZL
6016   {
6017     assert((opcode[i]&0x3f)!=0x16);
6018   }
6019   if((opcode[i]&0x3f)==0x17) // BGTZL
6020   {
6021     assert((opcode[i]&0x3f)!=0x17);
6022   }
6023   assert(opcode[i]!=1); // BLTZ/BGEZ
6024
6025   //FIXME: Check CSREG
6026   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6027     if((source[i]&0x30000)==0) // BC1F
6028     {
6029       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6030       emit_testimm(s1l,0x800000);
6031       emit_cmovne_reg(alt,addr);
6032     }
6033     if((source[i]&0x30000)==0x10000) // BC1T
6034     {
6035       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6036       emit_testimm(s1l,0x800000);
6037       emit_cmovne_reg(alt,addr);
6038     }
6039     if((source[i]&0x30000)==0x20000) // BC1FL
6040     {
6041       emit_testimm(s1l,0x800000);
6042       nottaken=(int)out;
6043       emit_jne(0);
6044     }
6045     if((source[i]&0x30000)==0x30000) // BC1TL
6046     {
6047       emit_testimm(s1l,0x800000);
6048       nottaken=(int)out;
6049       emit_jeq(0);
6050     }
6051   }
6052
6053   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6054   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6055   if(likely[i]||unconditional)
6056   {
6057     emit_movimm(ba[i],HOST_BTREG);
6058   }
6059   else if(addr!=HOST_BTREG)
6060   {
6061     emit_mov(addr,HOST_BTREG);
6062   }
6063   void *branch_addr=out;
6064   emit_jmp(0);
6065   int target_addr=start+i*4+5;
6066   void *stub=out;
6067   void *compiled_target_addr=check_addr(target_addr);
6068   emit_extjump_ds((int)branch_addr,target_addr);
6069   if(compiled_target_addr) {
6070     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6071     add_link(target_addr,stub);
6072   }
6073   else set_jump_target((int)branch_addr,(int)stub);
6074   if(likely[i]) {
6075     // Not-taken path
6076     set_jump_target((int)nottaken,(int)out);
6077     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6078     void *branch_addr=out;
6079     emit_jmp(0);
6080     int target_addr=start+i*4+8;
6081     void *stub=out;
6082     void *compiled_target_addr=check_addr(target_addr);
6083     emit_extjump_ds((int)branch_addr,target_addr);
6084     if(compiled_target_addr) {
6085       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6086       add_link(target_addr,stub);
6087     }
6088     else set_jump_target((int)branch_addr,(int)stub);
6089   }
6090 }
6091
6092 // Assemble the delay slot for the above
6093 static void pagespan_ds()
6094 {
6095   assem_debug("initial delay slot:\n");
6096   u_int vaddr=start+1;
6097   u_int page=get_page(vaddr);
6098   u_int vpage=get_vpage(vaddr);
6099   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6100   do_dirty_stub_ds();
6101   ll_add(jump_in+page,vaddr,(void *)out);
6102   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6103   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6104     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6105   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6106     emit_writeword(HOST_BTREG,(int)&branch_target);
6107   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6108   address_generation(0,&regs[0],regs[0].regmap_entry);
6109   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6110     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6111   cop1_usable=0;
6112   is_delayslot=0;
6113   switch(itype[0]) {
6114     case ALU:
6115       alu_assemble(0,&regs[0]);break;
6116     case IMM16:
6117       imm16_assemble(0,&regs[0]);break;
6118     case SHIFT:
6119       shift_assemble(0,&regs[0]);break;
6120     case SHIFTIMM:
6121       shiftimm_assemble(0,&regs[0]);break;
6122     case LOAD:
6123       load_assemble(0,&regs[0]);break;
6124     case LOADLR:
6125       loadlr_assemble(0,&regs[0]);break;
6126     case STORE:
6127       store_assemble(0,&regs[0]);break;
6128     case STORELR:
6129       storelr_assemble(0,&regs[0]);break;
6130     case COP0:
6131       cop0_assemble(0,&regs[0]);break;
6132     case COP1:
6133       cop1_assemble(0,&regs[0]);break;
6134     case C1LS:
6135       c1ls_assemble(0,&regs[0]);break;
6136     case COP2:
6137       cop2_assemble(0,&regs[0]);break;
6138     case C2LS:
6139       c2ls_assemble(0,&regs[0]);break;
6140     case C2OP:
6141       c2op_assemble(0,&regs[0]);break;
6142     case FCONV:
6143       fconv_assemble(0,&regs[0]);break;
6144     case FLOAT:
6145       float_assemble(0,&regs[0]);break;
6146     case FCOMP:
6147       fcomp_assemble(0,&regs[0]);break;
6148     case MULTDIV:
6149       multdiv_assemble(0,&regs[0]);break;
6150     case MOV:
6151       mov_assemble(0,&regs[0]);break;
6152     case SYSCALL:
6153     case HLECALL:
6154     case INTCALL:
6155     case SPAN:
6156     case UJUMP:
6157     case RJUMP:
6158     case CJUMP:
6159     case SJUMP:
6160     case FJUMP:
6161       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
6162   }
6163   int btaddr=get_reg(regs[0].regmap,BTREG);
6164   if(btaddr<0) {
6165     btaddr=get_reg(regs[0].regmap,-1);
6166     emit_readword((int)&branch_target,btaddr);
6167   }
6168   assert(btaddr!=HOST_CCREG);
6169   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6170 #ifdef HOST_IMM8
6171   emit_movimm(start+4,HOST_TEMPREG);
6172   emit_cmp(btaddr,HOST_TEMPREG);
6173 #else
6174   emit_cmpimm(btaddr,start+4);
6175 #endif
6176   int branch=(int)out;
6177   emit_jeq(0);
6178   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6179   emit_jmp(jump_vaddr_reg[btaddr]);
6180   set_jump_target(branch,(int)out);
6181   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6182   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6183 }
6184
6185 // Basic liveness analysis for MIPS registers
6186 void unneeded_registers(int istart,int iend,int r)
6187 {
6188   int i;
6189   uint64_t u,uu,gte_u,b,bu,gte_bu;
6190   uint64_t temp_u,temp_uu,temp_gte_u=0;
6191   uint64_t tdep;
6192   uint64_t gte_u_unknown=0;
6193   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
6194     gte_u_unknown=~0ll;
6195   if(iend==slen-1) {
6196     u=1;uu=1;
6197     gte_u=gte_u_unknown;
6198   }else{
6199     u=unneeded_reg[iend+1];
6200     uu=unneeded_reg_upper[iend+1];
6201     u=1;uu=1;
6202     gte_u=gte_unneeded[iend+1];
6203   }
6204
6205   for (i=iend;i>=istart;i--)
6206   {
6207     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6208     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6209     {
6210       // If subroutine call, flag return address as a possible branch target
6211       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6212
6213       if(ba[i]<start || ba[i]>=(start+slen*4))
6214       {
6215         // Branch out of this block, flush all regs
6216         u=1;
6217         uu=1;
6218         gte_u=gte_u_unknown;
6219         /* Hexagon hack
6220         if(itype[i]==UJUMP&&rt1[i]==31)
6221         {
6222           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6223         }
6224         if(itype[i]==RJUMP&&rs1[i]==31)
6225         {
6226           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6227         }
6228         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6229           if(itype[i]==UJUMP&&rt1[i]==31)
6230           {
6231             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6232             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6233           }
6234           if(itype[i]==RJUMP&&rs1[i]==31)
6235           {
6236             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6237             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6238           }
6239         }*/
6240         branch_unneeded_reg[i]=u;
6241         branch_unneeded_reg_upper[i]=uu;
6242         // Merge in delay slot
6243         tdep=(~uu>>rt1[i+1])&1;
6244         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6245         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6246         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6247         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6248         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6249         u|=1;uu|=1;
6250         gte_u|=gte_rt[i+1];
6251         gte_u&=~gte_rs[i+1];
6252         // If branch is "likely" (and conditional)
6253         // then we skip the delay slot on the fall-thru path
6254         if(likely[i]) {
6255           if(i<slen-1) {
6256             u&=unneeded_reg[i+2];
6257             uu&=unneeded_reg_upper[i+2];
6258             gte_u&=gte_unneeded[i+2];
6259           }
6260           else
6261           {
6262             u=1;
6263             uu=1;
6264             gte_u=gte_u_unknown;
6265           }
6266         }
6267       }
6268       else
6269       {
6270         // Internal branch, flag target
6271         bt[(ba[i]-start)>>2]=1;
6272         if(ba[i]<=start+i*4) {
6273           // Backward branch
6274           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6275           {
6276             // Unconditional branch
6277             temp_u=1;temp_uu=1;
6278             temp_gte_u=0;
6279           } else {
6280             // Conditional branch (not taken case)
6281             temp_u=unneeded_reg[i+2];
6282             temp_uu=unneeded_reg_upper[i+2];
6283             temp_gte_u&=gte_unneeded[i+2];
6284           }
6285           // Merge in delay slot
6286           tdep=(~temp_uu>>rt1[i+1])&1;
6287           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6288           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6289           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6290           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6291           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6292           temp_u|=1;temp_uu|=1;
6293           temp_gte_u|=gte_rt[i+1];
6294           temp_gte_u&=~gte_rs[i+1];
6295           // If branch is "likely" (and conditional)
6296           // then we skip the delay slot on the fall-thru path
6297           if(likely[i]) {
6298             if(i<slen-1) {
6299               temp_u&=unneeded_reg[i+2];
6300               temp_uu&=unneeded_reg_upper[i+2];
6301               temp_gte_u&=gte_unneeded[i+2];
6302             }
6303             else
6304             {
6305               temp_u=1;
6306               temp_uu=1;
6307               temp_gte_u=gte_u_unknown;
6308             }
6309           }
6310           tdep=(~temp_uu>>rt1[i])&1;
6311           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6312           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6313           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6314           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6315           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6316           temp_u|=1;temp_uu|=1;
6317           temp_gte_u|=gte_rt[i];
6318           temp_gte_u&=~gte_rs[i];
6319           unneeded_reg[i]=temp_u;
6320           unneeded_reg_upper[i]=temp_uu;
6321           gte_unneeded[i]=temp_gte_u;
6322           // Only go three levels deep.  This recursion can take an
6323           // excessive amount of time if there are a lot of nested loops.
6324           if(r<2) {
6325             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6326           }else{
6327             unneeded_reg[(ba[i]-start)>>2]=1;
6328             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6329             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
6330           }
6331         } /*else*/ if(1) {
6332           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6333           {
6334             // Unconditional branch
6335             u=unneeded_reg[(ba[i]-start)>>2];
6336             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6337             gte_u=gte_unneeded[(ba[i]-start)>>2];
6338             branch_unneeded_reg[i]=u;
6339             branch_unneeded_reg_upper[i]=uu;
6340         //u=1;
6341         //uu=1;
6342         //branch_unneeded_reg[i]=u;
6343         //branch_unneeded_reg_upper[i]=uu;
6344             // Merge in delay slot
6345             tdep=(~uu>>rt1[i+1])&1;
6346             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6347             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6348             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6349             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6350             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6351             u|=1;uu|=1;
6352             gte_u|=gte_rt[i+1];
6353             gte_u&=~gte_rs[i+1];
6354           } else {
6355             // Conditional branch
6356             b=unneeded_reg[(ba[i]-start)>>2];
6357             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6358             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6359             branch_unneeded_reg[i]=b;
6360             branch_unneeded_reg_upper[i]=bu;
6361         //b=1;
6362         //bu=1;
6363         //branch_unneeded_reg[i]=b;
6364         //branch_unneeded_reg_upper[i]=bu;
6365             // Branch delay slot
6366             tdep=(~uu>>rt1[i+1])&1;
6367             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6368             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6369             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6370             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6371             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6372             b|=1;bu|=1;
6373             gte_bu|=gte_rt[i+1];
6374             gte_bu&=~gte_rs[i+1];
6375             // If branch is "likely" then we skip the
6376             // delay slot on the fall-thru path
6377             if(likely[i]) {
6378               u=b;
6379               uu=bu;
6380               gte_u=gte_bu;
6381               if(i<slen-1) {
6382                 u&=unneeded_reg[i+2];
6383                 uu&=unneeded_reg_upper[i+2];
6384                 gte_u&=gte_unneeded[i+2];
6385         //u=1;
6386         //uu=1;
6387               }
6388             } else {
6389               u&=b;
6390               uu&=bu;
6391               gte_u&=gte_bu;
6392         //u=1;
6393         //uu=1;
6394             }
6395             if(i<slen-1) {
6396               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6397               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6398         //branch_unneeded_reg[i]=1;
6399         //branch_unneeded_reg_upper[i]=1;
6400             } else {
6401               branch_unneeded_reg[i]=1;
6402               branch_unneeded_reg_upper[i]=1;
6403             }
6404           }
6405         }
6406       }
6407     }
6408     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6409     {
6410       // SYSCALL instruction (software interrupt)
6411       u=1;
6412       uu=1;
6413     }
6414     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6415     {
6416       // ERET instruction (return from interrupt)
6417       u=1;
6418       uu=1;
6419     }
6420     //u=uu=1; // DEBUG
6421     tdep=(~uu>>rt1[i])&1;
6422     // Written registers are unneeded
6423     u|=1LL<<rt1[i];
6424     u|=1LL<<rt2[i];
6425     uu|=1LL<<rt1[i];
6426     uu|=1LL<<rt2[i];
6427     gte_u|=gte_rt[i];
6428     // Accessed registers are needed
6429     u&=~(1LL<<rs1[i]);
6430     u&=~(1LL<<rs2[i]);
6431     uu&=~(1LL<<us1[i]);
6432     uu&=~(1LL<<us2[i]);
6433     gte_u&=~gte_rs[i];
6434     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
6435       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
6436     // Source-target dependencies
6437     uu&=~(tdep<<dep1[i]);
6438     uu&=~(tdep<<dep2[i]);
6439     // R0 is always unneeded
6440     u|=1;uu|=1;
6441     // Save it
6442     unneeded_reg[i]=u;
6443     unneeded_reg_upper[i]=uu;
6444     gte_unneeded[i]=gte_u;
6445     /*
6446     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6447     printf("U:");
6448     int r;
6449     for(r=1;r<=CCREG;r++) {
6450       if((unneeded_reg[i]>>r)&1) {
6451         if(r==HIREG) printf(" HI");
6452         else if(r==LOREG) printf(" LO");
6453         else printf(" r%d",r);
6454       }
6455     }
6456     printf(" UU:");
6457     for(r=1;r<=CCREG;r++) {
6458       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6459         if(r==HIREG) printf(" HI");
6460         else if(r==LOREG) printf(" LO");
6461         else printf(" r%d",r);
6462       }
6463     }
6464     printf("\n");*/
6465   }
6466   for (i=iend;i>=istart;i--)
6467   {
6468     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6469   }
6470 }
6471
6472 // Write back dirty registers as soon as we will no longer modify them,
6473 // so that we don't end up with lots of writes at the branches.
6474 void clean_registers(int istart,int iend,int wr)
6475 {
6476   int i;
6477   int r;
6478   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
6479   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
6480   if(iend==slen-1) {
6481     will_dirty_i=will_dirty_next=0;
6482     wont_dirty_i=wont_dirty_next=0;
6483   }else{
6484     will_dirty_i=will_dirty_next=will_dirty[iend+1];
6485     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
6486   }
6487   for (i=iend;i>=istart;i--)
6488   {
6489     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6490     {
6491       if(ba[i]<start || ba[i]>=(start+slen*4))
6492       {
6493         // Branch out of this block, flush all regs
6494         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6495         {
6496           // Unconditional branch
6497           will_dirty_i=0;
6498           wont_dirty_i=0;
6499           // Merge in delay slot (will dirty)
6500           for(r=0;r<HOST_REGS;r++) {
6501             if(r!=EXCLUDE_REG) {
6502               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6503               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6504               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6505               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6506               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6507               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6508               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6509               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6510               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6511               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6512               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6513               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6514               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6515               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6516             }
6517           }
6518         }
6519         else
6520         {
6521           // Conditional branch
6522           will_dirty_i=0;
6523           wont_dirty_i=wont_dirty_next;
6524           // Merge in delay slot (will dirty)
6525           for(r=0;r<HOST_REGS;r++) {
6526             if(r!=EXCLUDE_REG) {
6527               if(!likely[i]) {
6528                 // Might not dirty if likely branch is not taken
6529                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6530                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6531                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6532                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6533                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6534                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
6535                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6536                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6537                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6538                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6539                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6540                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6541                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6542                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6543               }
6544             }
6545           }
6546         }
6547         // Merge in delay slot (wont dirty)
6548         for(r=0;r<HOST_REGS;r++) {
6549           if(r!=EXCLUDE_REG) {
6550             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6551             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6552             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6553             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6554             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6555             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6556             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6557             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6558             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6559             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6560           }
6561         }
6562         if(wr) {
6563           #ifndef DESTRUCTIVE_WRITEBACK
6564           branch_regs[i].dirty&=wont_dirty_i;
6565           #endif
6566           branch_regs[i].dirty|=will_dirty_i;
6567         }
6568       }
6569       else
6570       {
6571         // Internal branch
6572         if(ba[i]<=start+i*4) {
6573           // Backward branch
6574           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6575           {
6576             // Unconditional branch
6577             temp_will_dirty=0;
6578             temp_wont_dirty=0;
6579             // Merge in delay slot (will dirty)
6580             for(r=0;r<HOST_REGS;r++) {
6581               if(r!=EXCLUDE_REG) {
6582                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6583                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6584                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6585                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6586                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6587                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6588                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6589                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6590                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6591                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6592                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6593                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6594                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6595                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6596               }
6597             }
6598           } else {
6599             // Conditional branch (not taken case)
6600             temp_will_dirty=will_dirty_next;
6601             temp_wont_dirty=wont_dirty_next;
6602             // Merge in delay slot (will dirty)
6603             for(r=0;r<HOST_REGS;r++) {
6604               if(r!=EXCLUDE_REG) {
6605                 if(!likely[i]) {
6606                   // Will not dirty if likely branch is not taken
6607                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6608                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6609                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6610                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6611                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6612                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
6613                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6614                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6615                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6616                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6617                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6618                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6619                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6620                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6621                 }
6622               }
6623             }
6624           }
6625           // Merge in delay slot (wont dirty)
6626           for(r=0;r<HOST_REGS;r++) {
6627             if(r!=EXCLUDE_REG) {
6628               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6629               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6630               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6631               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6632               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6633               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6634               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6635               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6636               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6637               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6638             }
6639           }
6640           // Deal with changed mappings
6641           if(i<iend) {
6642             for(r=0;r<HOST_REGS;r++) {
6643               if(r!=EXCLUDE_REG) {
6644                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
6645                   temp_will_dirty&=~(1<<r);
6646                   temp_wont_dirty&=~(1<<r);
6647                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6648                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6649                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6650                   } else {
6651                     temp_will_dirty|=1<<r;
6652                     temp_wont_dirty|=1<<r;
6653                   }
6654                 }
6655               }
6656             }
6657           }
6658           if(wr) {
6659             will_dirty[i]=temp_will_dirty;
6660             wont_dirty[i]=temp_wont_dirty;
6661             clean_registers((ba[i]-start)>>2,i-1,0);
6662           }else{
6663             // Limit recursion.  It can take an excessive amount
6664             // of time if there are a lot of nested loops.
6665             will_dirty[(ba[i]-start)>>2]=0;
6666             wont_dirty[(ba[i]-start)>>2]=-1;
6667           }
6668         }
6669         /*else*/ if(1)
6670         {
6671           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6672           {
6673             // Unconditional branch
6674             will_dirty_i=0;
6675             wont_dirty_i=0;
6676           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6677             for(r=0;r<HOST_REGS;r++) {
6678               if(r!=EXCLUDE_REG) {
6679                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6680                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
6681                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6682                 }
6683                 if(branch_regs[i].regmap[r]>=0) {
6684                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6685                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6686                 }
6687               }
6688             }
6689           //}
6690             // Merge in delay slot
6691             for(r=0;r<HOST_REGS;r++) {
6692               if(r!=EXCLUDE_REG) {
6693                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6694                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6695                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6696                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6697                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6698                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6699                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6700                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6701                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6702                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6703                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6704                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6705                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6706                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6707               }
6708             }
6709           } else {
6710             // Conditional branch
6711             will_dirty_i=will_dirty_next;
6712             wont_dirty_i=wont_dirty_next;
6713           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6714             for(r=0;r<HOST_REGS;r++) {
6715               if(r!=EXCLUDE_REG) {
6716                 signed char target_reg=branch_regs[i].regmap[r];
6717                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6718                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6719                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6720                 }
6721                 else if(target_reg>=0) {
6722                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6723                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6724                 }
6725                 // Treat delay slot as part of branch too
6726                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6727                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6728                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6729                 }
6730                 else
6731                 {
6732                   will_dirty[i+1]&=~(1<<r);
6733                 }*/
6734               }
6735             }
6736           //}
6737             // Merge in delay slot
6738             for(r=0;r<HOST_REGS;r++) {
6739               if(r!=EXCLUDE_REG) {
6740                 if(!likely[i]) {
6741                   // Might not dirty if likely branch is not taken
6742                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6743                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6744                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6745                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6746                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6747                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6748                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6749                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6750                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6751                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6752                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6753                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6754                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6755                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6756                 }
6757               }
6758             }
6759           }
6760           // Merge in delay slot (won't dirty)
6761           for(r=0;r<HOST_REGS;r++) {
6762             if(r!=EXCLUDE_REG) {
6763               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6764               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6765               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6766               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6767               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6768               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6769               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6770               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6771               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6772               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6773             }
6774           }
6775           if(wr) {
6776             #ifndef DESTRUCTIVE_WRITEBACK
6777             branch_regs[i].dirty&=wont_dirty_i;
6778             #endif
6779             branch_regs[i].dirty|=will_dirty_i;
6780           }
6781         }
6782       }
6783     }
6784     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6785     {
6786       // SYSCALL instruction (software interrupt)
6787       will_dirty_i=0;
6788       wont_dirty_i=0;
6789     }
6790     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6791     {
6792       // ERET instruction (return from interrupt)
6793       will_dirty_i=0;
6794       wont_dirty_i=0;
6795     }
6796     will_dirty_next=will_dirty_i;
6797     wont_dirty_next=wont_dirty_i;
6798     for(r=0;r<HOST_REGS;r++) {
6799       if(r!=EXCLUDE_REG) {
6800         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6801         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6802         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6803         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6804         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6805         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6806         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6807         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6808         if(i>istart) {
6809           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
6810           {
6811             // Don't store a register immediately after writing it,
6812             // may prevent dual-issue.
6813             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6814             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6815           }
6816         }
6817       }
6818     }
6819     // Save it
6820     will_dirty[i]=will_dirty_i;
6821     wont_dirty[i]=wont_dirty_i;
6822     // Mark registers that won't be dirtied as not dirty
6823     if(wr) {
6824       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6825       for(r=0;r<HOST_REGS;r++) {
6826         if((will_dirty_i>>r)&1) {
6827           printf(" r%d",r);
6828         }
6829       }
6830       printf("\n");*/
6831
6832       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
6833         regs[i].dirty|=will_dirty_i;
6834         #ifndef DESTRUCTIVE_WRITEBACK
6835         regs[i].dirty&=wont_dirty_i;
6836         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6837         {
6838           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
6839             for(r=0;r<HOST_REGS;r++) {
6840               if(r!=EXCLUDE_REG) {
6841                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6842                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6843                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6844               }
6845             }
6846           }
6847         }
6848         else
6849         {
6850           if(i<iend) {
6851             for(r=0;r<HOST_REGS;r++) {
6852               if(r!=EXCLUDE_REG) {
6853                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6854                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6855                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6856               }
6857             }
6858           }
6859         }
6860         #endif
6861       //}
6862     }
6863     // Deal with changed mappings
6864     temp_will_dirty=will_dirty_i;
6865     temp_wont_dirty=wont_dirty_i;
6866     for(r=0;r<HOST_REGS;r++) {
6867       if(r!=EXCLUDE_REG) {
6868         int nr;
6869         if(regs[i].regmap[r]==regmap_pre[i][r]) {
6870           if(wr) {
6871             #ifndef DESTRUCTIVE_WRITEBACK
6872             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6873             #endif
6874             regs[i].wasdirty|=will_dirty_i&(1<<r);
6875           }
6876         }
6877         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6878           // Register moved to a different register
6879           will_dirty_i&=~(1<<r);
6880           wont_dirty_i&=~(1<<r);
6881           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6882           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6883           if(wr) {
6884             #ifndef DESTRUCTIVE_WRITEBACK
6885             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6886             #endif
6887             regs[i].wasdirty|=will_dirty_i&(1<<r);
6888           }
6889         }
6890         else {
6891           will_dirty_i&=~(1<<r);
6892           wont_dirty_i&=~(1<<r);
6893           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6894             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6895             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6896           } else {
6897             wont_dirty_i|=1<<r;
6898             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6899           }
6900         }
6901       }
6902     }
6903   }
6904 }
6905
6906 #ifdef DISASM
6907   /* disassembly */
6908 void disassemble_inst(int i)
6909 {
6910     if (bt[i]) printf("*"); else printf(" ");
6911     switch(itype[i]) {
6912       case UJUMP:
6913         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6914       case CJUMP:
6915         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6916       case SJUMP:
6917         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6918       case FJUMP:
6919         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6920       case RJUMP:
6921         if (opcode[i]==0x9&&rt1[i]!=31)
6922           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6923         else
6924           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6925         break;
6926       case SPAN:
6927         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6928       case IMM16:
6929         if(opcode[i]==0xf) //LUI
6930           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6931         else
6932           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6933         break;
6934       case LOAD:
6935       case LOADLR:
6936         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6937         break;
6938       case STORE:
6939       case STORELR:
6940         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6941         break;
6942       case ALU:
6943       case SHIFT:
6944         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6945         break;
6946       case MULTDIV:
6947         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6948         break;
6949       case SHIFTIMM:
6950         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6951         break;
6952       case MOV:
6953         if((opcode2[i]&0x1d)==0x10)
6954           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6955         else if((opcode2[i]&0x1d)==0x11)
6956           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6957         else
6958           printf (" %x: %s\n",start+i*4,insn[i]);
6959         break;
6960       case COP0:
6961         if(opcode2[i]==0)
6962           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6963         else if(opcode2[i]==4)
6964           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6965         else printf (" %x: %s\n",start+i*4,insn[i]);
6966         break;
6967       case COP1:
6968         if(opcode2[i]<3)
6969           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6970         else if(opcode2[i]>3)
6971           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6972         else printf (" %x: %s\n",start+i*4,insn[i]);
6973         break;
6974       case COP2:
6975         if(opcode2[i]<3)
6976           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
6977         else if(opcode2[i]>3)
6978           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
6979         else printf (" %x: %s\n",start+i*4,insn[i]);
6980         break;
6981       case C1LS:
6982         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6983         break;
6984       case C2LS:
6985         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6986         break;
6987       case INTCALL:
6988         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6989         break;
6990       default:
6991         //printf (" %s %8x\n",insn[i],source[i]);
6992         printf (" %x: %s\n",start+i*4,insn[i]);
6993     }
6994 }
6995 #else
6996 static void disassemble_inst(int i) {}
6997 #endif // DISASM
6998
6999 #define DRC_TEST_VAL 0x74657374
7000
7001 static int new_dynarec_test(void)
7002 {
7003   int (*testfunc)(void) = (void *)out;
7004   void *beginning;
7005   int ret;
7006
7007   beginning = start_block();
7008   emit_movimm(DRC_TEST_VAL,0); // test
7009   emit_jmpreg(14);
7010   literal_pool(0);
7011   end_block(beginning);
7012   SysPrintf("testing if we can run recompiled code..\n");
7013   ret = testfunc();
7014   if (ret == DRC_TEST_VAL)
7015     SysPrintf("test passed.\n");
7016   else
7017     SysPrintf("test failed: %08x\n", ret);
7018   out=(u_char *)BASE_ADDR;
7019   return ret == DRC_TEST_VAL;
7020 }
7021
7022 // clear the state completely, instead of just marking
7023 // things invalid like invalidate_all_pages() does
7024 void new_dynarec_clear_full()
7025 {
7026   int n;
7027   out=(u_char *)BASE_ADDR;
7028   memset(invalid_code,1,sizeof(invalid_code));
7029   memset(hash_table,0xff,sizeof(hash_table));
7030   memset(mini_ht,-1,sizeof(mini_ht));
7031   memset(restore_candidate,0,sizeof(restore_candidate));
7032   memset(shadow,0,sizeof(shadow));
7033   copy=shadow;
7034   expirep=16384; // Expiry pointer, +2 blocks
7035   pending_exception=0;
7036   literalcount=0;
7037   stop_after_jal=0;
7038   inv_code_start=inv_code_end=~0;
7039   // TLB
7040   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7041   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7042   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7043 }
7044
7045 void new_dynarec_init()
7046 {
7047   SysPrintf("Init new dynarec\n");
7048
7049   // allocate/prepare a buffer for translation cache
7050   // see assem_arm.h for some explanation
7051 #if   defined(BASE_ADDR_FIXED)
7052   if (mmap (translation_cache, 1 << TARGET_SIZE_2,
7053             PROT_READ | PROT_WRITE | PROT_EXEC,
7054             MAP_PRIVATE | MAP_ANONYMOUS,
7055             -1, 0) != translation_cache) {
7056     SysPrintf("mmap() failed: %s\n", strerror(errno));
7057     SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
7058     abort();
7059   }
7060 #elif defined(BASE_ADDR_DYNAMIC)
7061   #ifdef VITA
7062   sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
7063   if (sceBlock < 0)
7064     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
7065   int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
7066   if (ret < 0)
7067     SysPrintf("sceKernelGetMemBlockBase failed\n");
7068   #else
7069   translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
7070             PROT_READ | PROT_WRITE | PROT_EXEC,
7071             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
7072   if (translation_cache == MAP_FAILED) {
7073     SysPrintf("mmap() failed: %s\n", strerror(errno));
7074     abort();
7075   }
7076   #endif
7077 #else
7078   #ifndef NO_WRITE_EXEC
7079   // not all systems allow execute in data segment by default
7080   if (mprotect((void *)BASE_ADDR, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
7081     SysPrintf("mprotect() failed: %s\n", strerror(errno));
7082   #endif
7083 #endif
7084   out=(u_char *)BASE_ADDR;
7085   cycle_multiplier=200;
7086   new_dynarec_clear_full();
7087 #ifdef HOST_IMM8
7088   // Copy this into local area so we don't have to put it in every literal pool
7089   invc_ptr=invalid_code;
7090 #endif
7091   arch_init();
7092   new_dynarec_test();
7093 #ifndef RAM_FIXED
7094   ram_offset=(u_int)rdram-0x80000000;
7095 #endif
7096   if (ram_offset!=0)
7097     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
7098 }
7099
7100 void new_dynarec_cleanup()
7101 {
7102   int n;
7103 #if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
7104   #ifdef VITA
7105   sceKernelFreeMemBlock(sceBlock);
7106   sceBlock = -1;
7107   #else
7108   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0)
7109     SysPrintf("munmap() failed\n");
7110   #endif
7111 #endif
7112   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7113   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7114   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7115   #ifdef ROM_COPY
7116   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
7117   #endif
7118 }
7119
7120 static u_int *get_source_start(u_int addr, u_int *limit)
7121 {
7122   if (addr < 0x00200000 ||
7123     (0xa0000000 <= addr && addr < 0xa0200000)) {
7124     // used for BIOS calls mostly?
7125     *limit = (addr&0xa0000000)|0x00200000;
7126     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7127   }
7128   else if (!Config.HLE && (
7129     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7130     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7131     // BIOS
7132     *limit = (addr & 0xfff00000) | 0x80000;
7133     return (u_int *)((u_int)psxR + (addr&0x7ffff));
7134   }
7135   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
7136     *limit = (addr & 0x80600000) + 0x00200000;
7137     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7138   }
7139   return NULL;
7140 }
7141
7142 static u_int scan_for_ret(u_int addr)
7143 {
7144   u_int limit = 0;
7145   u_int *mem;
7146
7147   mem = get_source_start(addr, &limit);
7148   if (mem == NULL)
7149     return addr;
7150
7151   if (limit > addr + 0x1000)
7152     limit = addr + 0x1000;
7153   for (; addr < limit; addr += 4, mem++) {
7154     if (*mem == 0x03e00008) // jr $ra
7155       return addr + 8;
7156   }
7157   return addr;
7158 }
7159
7160 struct savestate_block {
7161   uint32_t addr;
7162   uint32_t regflags;
7163 };
7164
7165 static int addr_cmp(const void *p1_, const void *p2_)
7166 {
7167   const struct savestate_block *p1 = p1_, *p2 = p2_;
7168   return p1->addr - p2->addr;
7169 }
7170
7171 int new_dynarec_save_blocks(void *save, int size)
7172 {
7173   struct savestate_block *blocks = save;
7174   int maxcount = size / sizeof(blocks[0]);
7175   struct savestate_block tmp_blocks[1024];
7176   struct ll_entry *head;
7177   int p, s, d, o, bcnt;
7178   u_int addr;
7179
7180   o = 0;
7181   for (p = 0; p < sizeof(jump_in) / sizeof(jump_in[0]); p++) {
7182     bcnt = 0;
7183     for (head = jump_in[p]; head != NULL; head = head->next) {
7184       tmp_blocks[bcnt].addr = head->vaddr;
7185       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
7186       bcnt++;
7187     }
7188     if (bcnt < 1)
7189       continue;
7190     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
7191
7192     addr = tmp_blocks[0].addr;
7193     for (s = d = 0; s < bcnt; s++) {
7194       if (tmp_blocks[s].addr < addr)
7195         continue;
7196       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
7197         tmp_blocks[d++] = tmp_blocks[s];
7198       addr = scan_for_ret(tmp_blocks[s].addr);
7199     }
7200
7201     if (o + d > maxcount)
7202       d = maxcount - o;
7203     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
7204     o += d;
7205   }
7206
7207   return o * sizeof(blocks[0]);
7208 }
7209
7210 void new_dynarec_load_blocks(const void *save, int size)
7211 {
7212   const struct savestate_block *blocks = save;
7213   int count = size / sizeof(blocks[0]);
7214   u_int regs_save[32];
7215   uint32_t f;
7216   int i, b;
7217
7218   get_addr(psxRegs.pc);
7219
7220   // change GPRs for speculation to at least partially work..
7221   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
7222   for (i = 1; i < 32; i++)
7223     psxRegs.GPR.r[i] = 0x80000000;
7224
7225   for (b = 0; b < count; b++) {
7226     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7227       if (f & 1)
7228         psxRegs.GPR.r[i] = 0x1f800000;
7229     }
7230
7231     get_addr(blocks[b].addr);
7232
7233     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7234       if (f & 1)
7235         psxRegs.GPR.r[i] = 0x80000000;
7236     }
7237   }
7238
7239   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
7240 }
7241
7242 int new_recompile_block(int addr)
7243 {
7244   u_int pagelimit = 0;
7245   u_int state_rflags = 0;
7246   int i;
7247
7248   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7249   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7250   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7251   //if(debug)
7252   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7253   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7254   /*if(Count>=312978186) {
7255     rlist();
7256   }*/
7257   //rlist();
7258
7259   // this is just for speculation
7260   for (i = 1; i < 32; i++) {
7261     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
7262       state_rflags |= 1 << i;
7263   }
7264
7265   start = (u_int)addr&~3;
7266   //assert(((u_int)addr&1)==0);
7267   new_dynarec_did_compile=1;
7268   if (Config.HLE && start == 0x80001000) // hlecall
7269   {
7270     // XXX: is this enough? Maybe check hleSoftCall?
7271     void *beginning=start_block();
7272     u_int page=get_page(start);
7273
7274     invalid_code[start>>12]=0;
7275     emit_movimm(start,0);
7276     emit_writeword(0,(int)&pcaddr);
7277     emit_jmp((int)new_dyna_leave);
7278     literal_pool(0);
7279     end_block(beginning);
7280     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
7281     return 0;
7282   }
7283
7284   source = get_source_start(start, &pagelimit);
7285   if (source == NULL) {
7286     SysPrintf("Compile at bogus memory address: %08x\n", addr);
7287     exit(1);
7288   }
7289
7290   /* Pass 1: disassemble */
7291   /* Pass 2: register dependencies, branch targets */
7292   /* Pass 3: register allocation */
7293   /* Pass 4: branch dependencies */
7294   /* Pass 5: pre-alloc */
7295   /* Pass 6: optimize clean/dirty state */
7296   /* Pass 7: flag 32-bit registers */
7297   /* Pass 8: assembly */
7298   /* Pass 9: linker */
7299   /* Pass 10: garbage collection / free memory */
7300
7301   int j;
7302   int done=0;
7303   unsigned int type,op,op2;
7304
7305   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7306
7307   /* Pass 1 disassembly */
7308
7309   for(i=0;!done;i++) {
7310     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
7311     minimum_free_regs[i]=0;
7312     opcode[i]=op=source[i]>>26;
7313     switch(op)
7314     {
7315       case 0x00: strcpy(insn[i],"special"); type=NI;
7316         op2=source[i]&0x3f;
7317         switch(op2)
7318         {
7319           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7320           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7321           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7322           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7323           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7324           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7325           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7326           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7327           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7328           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7329           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7330           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7331           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7332           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7333           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7334           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7335           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7336           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7337           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7338           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7339           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7340           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7341           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7342           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7343           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7344           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7345           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7346           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7347           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7348           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7349           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7350           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7351           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7352           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7353           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7354 #if 0
7355           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7356           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7357           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7358           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7359           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7360           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7361           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7362           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7363           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7364           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7365           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7366           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7367           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7368           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7369           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7370           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7371           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7372 #endif
7373         }
7374         break;
7375       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7376         op2=(source[i]>>16)&0x1f;
7377         switch(op2)
7378         {
7379           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7380           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7381           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7382           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7383           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7384           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7385           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7386           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7387           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7388           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7389           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7390           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7391           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7392           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7393         }
7394         break;
7395       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7396       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7397       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7398       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7399       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7400       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7401       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7402       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7403       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7404       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7405       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7406       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7407       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7408       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7409       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7410         op2=(source[i]>>21)&0x1f;
7411         switch(op2)
7412         {
7413           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7414           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7415           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7416           switch(source[i]&0x3f)
7417           {
7418             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7419             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7420             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7421             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7422             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7423             //case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7424           }
7425         }
7426         break;
7427       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7428         op2=(source[i]>>21)&0x1f;
7429         switch(op2)
7430         {
7431           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7432           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7433           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7434           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7435           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7436           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7437           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7438           switch((source[i]>>16)&0x3)
7439           {
7440             case 0x00: strcpy(insn[i],"BC1F"); break;
7441             case 0x01: strcpy(insn[i],"BC1T"); break;
7442             case 0x02: strcpy(insn[i],"BC1FL"); break;
7443             case 0x03: strcpy(insn[i],"BC1TL"); break;
7444           }
7445           break;
7446           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7447           switch(source[i]&0x3f)
7448           {
7449             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7450             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7451             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7452             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7453             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7454             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7455             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7456             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7457             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7458             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7459             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7460             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7461             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7462             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7463             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7464             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7465             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7466             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7467             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7468             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7469             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7470             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7471             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7472             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7473             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7474             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7475             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7476             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7477             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7478             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7479             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7480             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7481             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7482             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7483             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7484           }
7485           break;
7486           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7487           switch(source[i]&0x3f)
7488           {
7489             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7490             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
7491             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
7492             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
7493             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
7494             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
7495             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
7496             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
7497             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
7498             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
7499             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
7500             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
7501             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
7502             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
7503             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
7504             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
7505             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
7506             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
7507             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
7508             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
7509             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
7510             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
7511             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
7512             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
7513             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
7514             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
7515             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
7516             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
7517             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
7518             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
7519             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
7520             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
7521             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
7522             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
7523             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
7524           }
7525           break;
7526           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
7527           switch(source[i]&0x3f)
7528           {
7529             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
7530             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
7531           }
7532           break;
7533           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
7534           switch(source[i]&0x3f)
7535           {
7536             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
7537             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
7538           }
7539           break;
7540         }
7541         break;
7542 #if 0
7543       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
7544       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
7545       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
7546       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
7547       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
7548       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
7549       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
7550       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
7551 #endif
7552       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
7553       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
7554       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
7555       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
7556       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
7557       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
7558       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
7559 #if 0
7560       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
7561 #endif
7562       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
7563       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
7564       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
7565       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
7566 #if 0
7567       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
7568       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
7569 #endif
7570       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
7571       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
7572       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
7573       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
7574 #if 0
7575       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
7576       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
7577       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
7578 #endif
7579       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
7580       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
7581 #if 0
7582       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
7583       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
7584       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
7585 #endif
7586       case 0x12: strcpy(insn[i],"COP2"); type=NI;
7587         op2=(source[i]>>21)&0x1f;
7588         //if (op2 & 0x10) {
7589         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
7590           if (gte_handlers[source[i]&0x3f]!=NULL) {
7591             if (gte_regnames[source[i]&0x3f]!=NULL)
7592               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
7593             else
7594               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
7595             type=C2OP;
7596           }
7597         }
7598         else switch(op2)
7599         {
7600           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
7601           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
7602           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
7603           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
7604         }
7605         break;
7606       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
7607       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
7608       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
7609       default: strcpy(insn[i],"???"); type=NI;
7610         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
7611         break;
7612     }
7613     itype[i]=type;
7614     opcode2[i]=op2;
7615     /* Get registers/immediates */
7616     lt1[i]=0;
7617     us1[i]=0;
7618     us2[i]=0;
7619     dep1[i]=0;
7620     dep2[i]=0;
7621     gte_rs[i]=gte_rt[i]=0;
7622     switch(type) {
7623       case LOAD:
7624         rs1[i]=(source[i]>>21)&0x1f;
7625         rs2[i]=0;
7626         rt1[i]=(source[i]>>16)&0x1f;
7627         rt2[i]=0;
7628         imm[i]=(short)source[i];
7629         break;
7630       case STORE:
7631       case STORELR:
7632         rs1[i]=(source[i]>>21)&0x1f;
7633         rs2[i]=(source[i]>>16)&0x1f;
7634         rt1[i]=0;
7635         rt2[i]=0;
7636         imm[i]=(short)source[i];
7637         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
7638         break;
7639       case LOADLR:
7640         // LWL/LWR only load part of the register,
7641         // therefore the target register must be treated as a source too
7642         rs1[i]=(source[i]>>21)&0x1f;
7643         rs2[i]=(source[i]>>16)&0x1f;
7644         rt1[i]=(source[i]>>16)&0x1f;
7645         rt2[i]=0;
7646         imm[i]=(short)source[i];
7647         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
7648         if(op==0x26) dep1[i]=rt1[i]; // LWR
7649         break;
7650       case IMM16:
7651         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
7652         else rs1[i]=(source[i]>>21)&0x1f;
7653         rs2[i]=0;
7654         rt1[i]=(source[i]>>16)&0x1f;
7655         rt2[i]=0;
7656         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
7657           imm[i]=(unsigned short)source[i];
7658         }else{
7659           imm[i]=(short)source[i];
7660         }
7661         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
7662         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
7663         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
7664         break;
7665       case UJUMP:
7666         rs1[i]=0;
7667         rs2[i]=0;
7668         rt1[i]=0;
7669         rt2[i]=0;
7670         // The JAL instruction writes to r31.
7671         if (op&1) {
7672           rt1[i]=31;
7673         }
7674         rs2[i]=CCREG;
7675         break;
7676       case RJUMP:
7677         rs1[i]=(source[i]>>21)&0x1f;
7678         rs2[i]=0;
7679         rt1[i]=0;
7680         rt2[i]=0;
7681         // The JALR instruction writes to rd.
7682         if (op2&1) {
7683           rt1[i]=(source[i]>>11)&0x1f;
7684         }
7685         rs2[i]=CCREG;
7686         break;
7687       case CJUMP:
7688         rs1[i]=(source[i]>>21)&0x1f;
7689         rs2[i]=(source[i]>>16)&0x1f;
7690         rt1[i]=0;
7691         rt2[i]=0;
7692         if(op&2) { // BGTZ/BLEZ
7693           rs2[i]=0;
7694         }
7695         us1[i]=rs1[i];
7696         us2[i]=rs2[i];
7697         likely[i]=op>>4;
7698         break;
7699       case SJUMP:
7700         rs1[i]=(source[i]>>21)&0x1f;
7701         rs2[i]=CCREG;
7702         rt1[i]=0;
7703         rt2[i]=0;
7704         us1[i]=rs1[i];
7705         if(op2&0x10) { // BxxAL
7706           rt1[i]=31;
7707           // NOTE: If the branch is not taken, r31 is still overwritten
7708         }
7709         likely[i]=(op2&2)>>1;
7710         break;
7711       case FJUMP:
7712         rs1[i]=FSREG;
7713         rs2[i]=CSREG;
7714         rt1[i]=0;
7715         rt2[i]=0;
7716         likely[i]=((source[i])>>17)&1;
7717         break;
7718       case ALU:
7719         rs1[i]=(source[i]>>21)&0x1f; // source
7720         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
7721         rt1[i]=(source[i]>>11)&0x1f; // destination
7722         rt2[i]=0;
7723         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7724           us1[i]=rs1[i];us2[i]=rs2[i];
7725         }
7726         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7727           dep1[i]=rs1[i];dep2[i]=rs2[i];
7728         }
7729         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
7730           dep1[i]=rs1[i];dep2[i]=rs2[i];
7731         }
7732         break;
7733       case MULTDIV:
7734         rs1[i]=(source[i]>>21)&0x1f; // source
7735         rs2[i]=(source[i]>>16)&0x1f; // divisor
7736         rt1[i]=HIREG;
7737         rt2[i]=LOREG;
7738         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7739           us1[i]=rs1[i];us2[i]=rs2[i];
7740         }
7741         break;
7742       case MOV:
7743         rs1[i]=0;
7744         rs2[i]=0;
7745         rt1[i]=0;
7746         rt2[i]=0;
7747         if(op2==0x10) rs1[i]=HIREG; // MFHI
7748         if(op2==0x11) rt1[i]=HIREG; // MTHI
7749         if(op2==0x12) rs1[i]=LOREG; // MFLO
7750         if(op2==0x13) rt1[i]=LOREG; // MTLO
7751         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
7752         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
7753         dep1[i]=rs1[i];
7754         break;
7755       case SHIFT:
7756         rs1[i]=(source[i]>>16)&0x1f; // target of shift
7757         rs2[i]=(source[i]>>21)&0x1f; // shift amount
7758         rt1[i]=(source[i]>>11)&0x1f; // destination
7759         rt2[i]=0;
7760         // DSLLV/DSRLV/DSRAV are 64-bit
7761         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
7762         break;
7763       case SHIFTIMM:
7764         rs1[i]=(source[i]>>16)&0x1f;
7765         rs2[i]=0;
7766         rt1[i]=(source[i]>>11)&0x1f;
7767         rt2[i]=0;
7768         imm[i]=(source[i]>>6)&0x1f;
7769         // DSxx32 instructions
7770         if(op2>=0x3c) imm[i]|=0x20;
7771         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
7772         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
7773         break;
7774       case COP0:
7775         rs1[i]=0;
7776         rs2[i]=0;
7777         rt1[i]=0;
7778         rt2[i]=0;
7779         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
7780         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
7781         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
7782         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
7783         break;
7784       case COP1:
7785         rs1[i]=0;
7786         rs2[i]=0;
7787         rt1[i]=0;
7788         rt2[i]=0;
7789         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
7790         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
7791         if(op2==5) us1[i]=rs1[i]; // DMTC1
7792         rs2[i]=CSREG;
7793         break;
7794       case COP2:
7795         rs1[i]=0;
7796         rs2[i]=0;
7797         rt1[i]=0;
7798         rt2[i]=0;
7799         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
7800         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
7801         rs2[i]=CSREG;
7802         int gr=(source[i]>>11)&0x1F;
7803         switch(op2)
7804         {
7805           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
7806           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
7807           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
7808           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
7809         }
7810         break;
7811       case C1LS:
7812         rs1[i]=(source[i]>>21)&0x1F;
7813         rs2[i]=CSREG;
7814         rt1[i]=0;
7815         rt2[i]=0;
7816         imm[i]=(short)source[i];
7817         break;
7818       case C2LS:
7819         rs1[i]=(source[i]>>21)&0x1F;
7820         rs2[i]=0;
7821         rt1[i]=0;
7822         rt2[i]=0;
7823         imm[i]=(short)source[i];
7824         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
7825         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
7826         break;
7827       case C2OP:
7828         rs1[i]=0;
7829         rs2[i]=0;
7830         rt1[i]=0;
7831         rt2[i]=0;
7832         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
7833         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
7834         gte_rt[i]|=1ll<<63; // every op changes flags
7835         if((source[i]&0x3f)==GTE_MVMVA) {
7836           int v = (source[i] >> 15) & 3;
7837           gte_rs[i]&=~0xe3fll;
7838           if(v==3) gte_rs[i]|=0xe00ll;
7839           else gte_rs[i]|=3ll<<(v*2);
7840         }
7841         break;
7842       case FLOAT:
7843       case FCONV:
7844         rs1[i]=0;
7845         rs2[i]=CSREG;
7846         rt1[i]=0;
7847         rt2[i]=0;
7848         break;
7849       case FCOMP:
7850         rs1[i]=FSREG;
7851         rs2[i]=CSREG;
7852         rt1[i]=FSREG;
7853         rt2[i]=0;
7854         break;
7855       case SYSCALL:
7856       case HLECALL:
7857       case INTCALL:
7858         rs1[i]=CCREG;
7859         rs2[i]=0;
7860         rt1[i]=0;
7861         rt2[i]=0;
7862         break;
7863       default:
7864         rs1[i]=0;
7865         rs2[i]=0;
7866         rt1[i]=0;
7867         rt2[i]=0;
7868     }
7869     /* Calculate branch target addresses */
7870     if(type==UJUMP)
7871       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
7872     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
7873       ba[i]=start+i*4+8; // Ignore never taken branch
7874     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
7875       ba[i]=start+i*4+8; // Ignore never taken branch
7876     else if(type==CJUMP||type==SJUMP||type==FJUMP)
7877       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
7878     else ba[i]=-1;
7879     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
7880       int do_in_intrp=0;
7881       // branch in delay slot?
7882       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7883         // don't handle first branch and call interpreter if it's hit
7884         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7885         do_in_intrp=1;
7886       }
7887       // basic load delay detection
7888       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7889         int t=(ba[i-1]-start)/4;
7890         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7891           // jump target wants DS result - potential load delay effect
7892           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7893           do_in_intrp=1;
7894           bt[t+1]=1; // expected return from interpreter
7895         }
7896         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7897               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
7898           // v0 overwrite like this is a sign of trouble, bail out
7899           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7900           do_in_intrp=1;
7901         }
7902       }
7903       if(do_in_intrp) {
7904         rs1[i-1]=CCREG;
7905         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7906         ba[i-1]=-1;
7907         itype[i-1]=INTCALL;
7908         done=2;
7909         i--; // don't compile the DS
7910       }
7911     }
7912     /* Is this the end of the block? */
7913     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
7914       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7915         done=2;
7916       }
7917       else {
7918         if(stop_after_jal) done=1;
7919         // Stop on BREAK
7920         if((source[i+1]&0xfc00003f)==0x0d) done=1;
7921       }
7922       // Don't recompile stuff that's already compiled
7923       if(check_addr(start+i*4+4)) done=1;
7924       // Don't get too close to the limit
7925       if(i>MAXBLOCK/2) done=1;
7926     }
7927     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7928     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7929     if(done==2) {
7930       // Does the block continue due to a branch?
7931       for(j=i-1;j>=0;j--)
7932       {
7933         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7934         if(ba[j]==start+i*4+4) done=j=0;
7935         if(ba[j]==start+i*4+8) done=j=0;
7936       }
7937     }
7938     //assert(i<MAXBLOCK-1);
7939     if(start+i*4==pagelimit-4) done=1;
7940     assert(start+i*4<pagelimit);
7941     if (i==MAXBLOCK-1) done=1;
7942     // Stop if we're compiling junk
7943     if(itype[i]==NI&&opcode[i]==0x11) {
7944       done=stop_after_jal=1;
7945       SysPrintf("Disabled speculative precompilation\n");
7946     }
7947   }
7948   slen=i;
7949   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
7950     if(start+i*4==pagelimit) {
7951       itype[i-1]=SPAN;
7952     }
7953   }
7954   assert(slen>0);
7955
7956   /* Pass 2 - Register dependencies and branch targets */
7957
7958   unneeded_registers(0,slen-1,0);
7959
7960   /* Pass 3 - Register allocation */
7961
7962   struct regstat current; // Current register allocations/status
7963   current.is32=1;
7964   current.dirty=0;
7965   current.u=unneeded_reg[0];
7966   current.uu=unneeded_reg_upper[0];
7967   clear_all_regs(current.regmap);
7968   alloc_reg(&current,0,CCREG);
7969   dirty_reg(&current,CCREG);
7970   current.isconst=0;
7971   current.wasconst=0;
7972   current.waswritten=0;
7973   int ds=0;
7974   int cc=0;
7975   int hr=-1;
7976
7977   if((u_int)addr&1) {
7978     // First instruction is delay slot
7979     cc=-1;
7980     bt[1]=1;
7981     ds=1;
7982     unneeded_reg[0]=1;
7983     unneeded_reg_upper[0]=1;
7984     current.regmap[HOST_BTREG]=BTREG;
7985   }
7986
7987   for(i=0;i<slen;i++)
7988   {
7989     if(bt[i])
7990     {
7991       int hr;
7992       for(hr=0;hr<HOST_REGS;hr++)
7993       {
7994         // Is this really necessary?
7995         if(current.regmap[hr]==0) current.regmap[hr]=-1;
7996       }
7997       current.isconst=0;
7998       current.waswritten=0;
7999     }
8000     if(i>1)
8001     {
8002       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8003       {
8004         if(rs1[i-2]==0||rs2[i-2]==0)
8005         {
8006           if(rs1[i-2]) {
8007             current.is32|=1LL<<rs1[i-2];
8008             int hr=get_reg(current.regmap,rs1[i-2]|64);
8009             if(hr>=0) current.regmap[hr]=-1;
8010           }
8011           if(rs2[i-2]) {
8012             current.is32|=1LL<<rs2[i-2];
8013             int hr=get_reg(current.regmap,rs2[i-2]|64);
8014             if(hr>=0) current.regmap[hr]=-1;
8015           }
8016         }
8017       }
8018     }
8019     current.is32=-1LL;
8020
8021     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8022     regs[i].wasconst=current.isconst;
8023     regs[i].was32=current.is32;
8024     regs[i].wasdirty=current.dirty;
8025     regs[i].loadedconst=0;
8026     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8027       if(i+1<slen) {
8028         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8029         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8030         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8031         current.u|=1;
8032         current.uu|=1;
8033       } else {
8034         current.u=1;
8035         current.uu=1;
8036       }
8037     } else {
8038       if(i+1<slen) {
8039         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8040         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8041         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8042         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8043         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8044         current.u|=1;
8045         current.uu|=1;
8046       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
8047     }
8048     is_ds[i]=ds;
8049     if(ds) {
8050       ds=0; // Skip delay slot, already allocated as part of branch
8051       // ...but we need to alloc it in case something jumps here
8052       if(i+1<slen) {
8053         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8054         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8055       }else{
8056         current.u=branch_unneeded_reg[i-1];
8057         current.uu=branch_unneeded_reg_upper[i-1];
8058       }
8059       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8060       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8061       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8062       current.u|=1;
8063       current.uu|=1;
8064       struct regstat temp;
8065       memcpy(&temp,&current,sizeof(current));
8066       temp.wasdirty=temp.dirty;
8067       temp.was32=temp.is32;
8068       // TODO: Take into account unconditional branches, as below
8069       delayslot_alloc(&temp,i);
8070       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8071       regs[i].wasdirty=temp.wasdirty;
8072       regs[i].was32=temp.was32;
8073       regs[i].dirty=temp.dirty;
8074       regs[i].is32=temp.is32;
8075       regs[i].isconst=0;
8076       regs[i].wasconst=0;
8077       current.isconst=0;
8078       // Create entry (branch target) regmap
8079       for(hr=0;hr<HOST_REGS;hr++)
8080       {
8081         int r=temp.regmap[hr];
8082         if(r>=0) {
8083           if(r!=regmap_pre[i][hr]) {
8084             regs[i].regmap_entry[hr]=-1;
8085           }
8086           else
8087           {
8088             if(r<64){
8089               if((current.u>>r)&1) {
8090                 regs[i].regmap_entry[hr]=-1;
8091                 regs[i].regmap[hr]=-1;
8092                 //Don't clear regs in the delay slot as the branch might need them
8093                 //current.regmap[hr]=-1;
8094               }else
8095                 regs[i].regmap_entry[hr]=r;
8096             }
8097             else {
8098               if((current.uu>>(r&63))&1) {
8099                 regs[i].regmap_entry[hr]=-1;
8100                 regs[i].regmap[hr]=-1;
8101                 //Don't clear regs in the delay slot as the branch might need them
8102                 //current.regmap[hr]=-1;
8103               }else
8104                 regs[i].regmap_entry[hr]=r;
8105             }
8106           }
8107         } else {
8108           // First instruction expects CCREG to be allocated
8109           if(i==0&&hr==HOST_CCREG)
8110             regs[i].regmap_entry[hr]=CCREG;
8111           else
8112             regs[i].regmap_entry[hr]=-1;
8113         }
8114       }
8115     }
8116     else { // Not delay slot
8117       switch(itype[i]) {
8118         case UJUMP:
8119           //current.isconst=0; // DEBUG
8120           //current.wasconst=0; // DEBUG
8121           //regs[i].wasconst=0; // DEBUG
8122           clear_const(&current,rt1[i]);
8123           alloc_cc(&current,i);
8124           dirty_reg(&current,CCREG);
8125           if (rt1[i]==31) {
8126             alloc_reg(&current,i,31);
8127             dirty_reg(&current,31);
8128             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8129             //assert(rt1[i+1]!=rt1[i]);
8130             #ifdef REG_PREFETCH
8131             alloc_reg(&current,i,PTEMP);
8132             #endif
8133             //current.is32|=1LL<<rt1[i];
8134           }
8135           ooo[i]=1;
8136           delayslot_alloc(&current,i+1);
8137           //current.isconst=0; // DEBUG
8138           ds=1;
8139           //printf("i=%d, isconst=%x\n",i,current.isconst);
8140           break;
8141         case RJUMP:
8142           //current.isconst=0;
8143           //current.wasconst=0;
8144           //regs[i].wasconst=0;
8145           clear_const(&current,rs1[i]);
8146           clear_const(&current,rt1[i]);
8147           alloc_cc(&current,i);
8148           dirty_reg(&current,CCREG);
8149           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8150             alloc_reg(&current,i,rs1[i]);
8151             if (rt1[i]!=0) {
8152               alloc_reg(&current,i,rt1[i]);
8153               dirty_reg(&current,rt1[i]);
8154               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
8155               assert(rt1[i+1]!=rt1[i]);
8156               #ifdef REG_PREFETCH
8157               alloc_reg(&current,i,PTEMP);
8158               #endif
8159             }
8160             #ifdef USE_MINI_HT
8161             if(rs1[i]==31) { // JALR
8162               alloc_reg(&current,i,RHASH);
8163               #ifndef HOST_IMM_ADDR32
8164               alloc_reg(&current,i,RHTBL);
8165               #endif
8166             }
8167             #endif
8168             delayslot_alloc(&current,i+1);
8169           } else {
8170             // The delay slot overwrites our source register,
8171             // allocate a temporary register to hold the old value.
8172             current.isconst=0;
8173             current.wasconst=0;
8174             regs[i].wasconst=0;
8175             delayslot_alloc(&current,i+1);
8176             current.isconst=0;
8177             alloc_reg(&current,i,RTEMP);
8178           }
8179           //current.isconst=0; // DEBUG
8180           ooo[i]=1;
8181           ds=1;
8182           break;
8183         case CJUMP:
8184           //current.isconst=0;
8185           //current.wasconst=0;
8186           //regs[i].wasconst=0;
8187           clear_const(&current,rs1[i]);
8188           clear_const(&current,rs2[i]);
8189           if((opcode[i]&0x3E)==4) // BEQ/BNE
8190           {
8191             alloc_cc(&current,i);
8192             dirty_reg(&current,CCREG);
8193             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8194             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8195             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8196             {
8197               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8198               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8199             }
8200             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8201                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8202               // The delay slot overwrites one of our conditions.
8203               // Allocate the branch condition registers instead.
8204               current.isconst=0;
8205               current.wasconst=0;
8206               regs[i].wasconst=0;
8207               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8208               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8209               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8210               {
8211                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8212                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8213               }
8214             }
8215             else
8216             {
8217               ooo[i]=1;
8218               delayslot_alloc(&current,i+1);
8219             }
8220           }
8221           else
8222           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8223           {
8224             alloc_cc(&current,i);
8225             dirty_reg(&current,CCREG);
8226             alloc_reg(&current,i,rs1[i]);
8227             if(!(current.is32>>rs1[i]&1))
8228             {
8229               alloc_reg64(&current,i,rs1[i]);
8230             }
8231             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8232               // The delay slot overwrites one of our conditions.
8233               // Allocate the branch condition registers instead.
8234               current.isconst=0;
8235               current.wasconst=0;
8236               regs[i].wasconst=0;
8237               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8238               if(!((current.is32>>rs1[i])&1))
8239               {
8240                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8241               }
8242             }
8243             else
8244             {
8245               ooo[i]=1;
8246               delayslot_alloc(&current,i+1);
8247             }
8248           }
8249           else
8250           // Don't alloc the delay slot yet because we might not execute it
8251           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8252           {
8253             current.isconst=0;
8254             current.wasconst=0;
8255             regs[i].wasconst=0;
8256             alloc_cc(&current,i);
8257             dirty_reg(&current,CCREG);
8258             alloc_reg(&current,i,rs1[i]);
8259             alloc_reg(&current,i,rs2[i]);
8260             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8261             {
8262               alloc_reg64(&current,i,rs1[i]);
8263               alloc_reg64(&current,i,rs2[i]);
8264             }
8265           }
8266           else
8267           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8268           {
8269             current.isconst=0;
8270             current.wasconst=0;
8271             regs[i].wasconst=0;
8272             alloc_cc(&current,i);
8273             dirty_reg(&current,CCREG);
8274             alloc_reg(&current,i,rs1[i]);
8275             if(!(current.is32>>rs1[i]&1))
8276             {
8277               alloc_reg64(&current,i,rs1[i]);
8278             }
8279           }
8280           ds=1;
8281           //current.isconst=0;
8282           break;
8283         case SJUMP:
8284           //current.isconst=0;
8285           //current.wasconst=0;
8286           //regs[i].wasconst=0;
8287           clear_const(&current,rs1[i]);
8288           clear_const(&current,rt1[i]);
8289           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8290           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8291           {
8292             alloc_cc(&current,i);
8293             dirty_reg(&current,CCREG);
8294             alloc_reg(&current,i,rs1[i]);
8295             if(!(current.is32>>rs1[i]&1))
8296             {
8297               alloc_reg64(&current,i,rs1[i]);
8298             }
8299             if (rt1[i]==31) { // BLTZAL/BGEZAL
8300               alloc_reg(&current,i,31);
8301               dirty_reg(&current,31);
8302               //#ifdef REG_PREFETCH
8303               //alloc_reg(&current,i,PTEMP);
8304               //#endif
8305               //current.is32|=1LL<<rt1[i];
8306             }
8307             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
8308                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
8309               // Allocate the branch condition registers instead.
8310               current.isconst=0;
8311               current.wasconst=0;
8312               regs[i].wasconst=0;
8313               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8314               if(!((current.is32>>rs1[i])&1))
8315               {
8316                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8317               }
8318             }
8319             else
8320             {
8321               ooo[i]=1;
8322               delayslot_alloc(&current,i+1);
8323             }
8324           }
8325           else
8326           // Don't alloc the delay slot yet because we might not execute it
8327           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8328           {
8329             current.isconst=0;
8330             current.wasconst=0;
8331             regs[i].wasconst=0;
8332             alloc_cc(&current,i);
8333             dirty_reg(&current,CCREG);
8334             alloc_reg(&current,i,rs1[i]);
8335             if(!(current.is32>>rs1[i]&1))
8336             {
8337               alloc_reg64(&current,i,rs1[i]);
8338             }
8339           }
8340           ds=1;
8341           //current.isconst=0;
8342           break;
8343         case FJUMP:
8344           current.isconst=0;
8345           current.wasconst=0;
8346           regs[i].wasconst=0;
8347           if(likely[i]==0) // BC1F/BC1T
8348           {
8349             // TODO: Theoretically we can run out of registers here on x86.
8350             // The delay slot can allocate up to six, and we need to check
8351             // CSREG before executing the delay slot.  Possibly we can drop
8352             // the cycle count and then reload it after checking that the
8353             // FPU is in a usable state, or don't do out-of-order execution.
8354             alloc_cc(&current,i);
8355             dirty_reg(&current,CCREG);
8356             alloc_reg(&current,i,FSREG);
8357             alloc_reg(&current,i,CSREG);
8358             if(itype[i+1]==FCOMP) {
8359               // The delay slot overwrites the branch condition.
8360               // Allocate the branch condition registers instead.
8361               alloc_cc(&current,i);
8362               dirty_reg(&current,CCREG);
8363               alloc_reg(&current,i,CSREG);
8364               alloc_reg(&current,i,FSREG);
8365             }
8366             else {
8367               ooo[i]=1;
8368               delayslot_alloc(&current,i+1);
8369               alloc_reg(&current,i+1,CSREG);
8370             }
8371           }
8372           else
8373           // Don't alloc the delay slot yet because we might not execute it
8374           if(likely[i]) // BC1FL/BC1TL
8375           {
8376             alloc_cc(&current,i);
8377             dirty_reg(&current,CCREG);
8378             alloc_reg(&current,i,CSREG);
8379             alloc_reg(&current,i,FSREG);
8380           }
8381           ds=1;
8382           current.isconst=0;
8383           break;
8384         case IMM16:
8385           imm16_alloc(&current,i);
8386           break;
8387         case LOAD:
8388         case LOADLR:
8389           load_alloc(&current,i);
8390           break;
8391         case STORE:
8392         case STORELR:
8393           store_alloc(&current,i);
8394           break;
8395         case ALU:
8396           alu_alloc(&current,i);
8397           break;
8398         case SHIFT:
8399           shift_alloc(&current,i);
8400           break;
8401         case MULTDIV:
8402           multdiv_alloc(&current,i);
8403           break;
8404         case SHIFTIMM:
8405           shiftimm_alloc(&current,i);
8406           break;
8407         case MOV:
8408           mov_alloc(&current,i);
8409           break;
8410         case COP0:
8411           cop0_alloc(&current,i);
8412           break;
8413         case COP1:
8414         case COP2:
8415           cop1_alloc(&current,i);
8416           break;
8417         case C1LS:
8418           c1ls_alloc(&current,i);
8419           break;
8420         case C2LS:
8421           c2ls_alloc(&current,i);
8422           break;
8423         case C2OP:
8424           c2op_alloc(&current,i);
8425           break;
8426         case FCONV:
8427           fconv_alloc(&current,i);
8428           break;
8429         case FLOAT:
8430           float_alloc(&current,i);
8431           break;
8432         case FCOMP:
8433           fcomp_alloc(&current,i);
8434           break;
8435         case SYSCALL:
8436         case HLECALL:
8437         case INTCALL:
8438           syscall_alloc(&current,i);
8439           break;
8440         case SPAN:
8441           pagespan_alloc(&current,i);
8442           break;
8443       }
8444
8445       // Drop the upper half of registers that have become 32-bit
8446       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8447       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8448         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8449         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8450         current.uu|=1;
8451       } else {
8452         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8453         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8454         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8455         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8456         current.uu|=1;
8457       }
8458
8459       // Create entry (branch target) regmap
8460       for(hr=0;hr<HOST_REGS;hr++)
8461       {
8462         int r,or;
8463         r=current.regmap[hr];
8464         if(r>=0) {
8465           if(r!=regmap_pre[i][hr]) {
8466             // TODO: delay slot (?)
8467             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
8468             if(or<0||(r&63)>=TEMPREG){
8469               regs[i].regmap_entry[hr]=-1;
8470             }
8471             else
8472             {
8473               // Just move it to a different register
8474               regs[i].regmap_entry[hr]=r;
8475               // If it was dirty before, it's still dirty
8476               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
8477             }
8478           }
8479           else
8480           {
8481             // Unneeded
8482             if(r==0){
8483               regs[i].regmap_entry[hr]=0;
8484             }
8485             else
8486             if(r<64){
8487               if((current.u>>r)&1) {
8488                 regs[i].regmap_entry[hr]=-1;
8489                 //regs[i].regmap[hr]=-1;
8490                 current.regmap[hr]=-1;
8491               }else
8492                 regs[i].regmap_entry[hr]=r;
8493             }
8494             else {
8495               if((current.uu>>(r&63))&1) {
8496                 regs[i].regmap_entry[hr]=-1;
8497                 //regs[i].regmap[hr]=-1;
8498                 current.regmap[hr]=-1;
8499               }else
8500                 regs[i].regmap_entry[hr]=r;
8501             }
8502           }
8503         } else {
8504           // Branches expect CCREG to be allocated at the target
8505           if(regmap_pre[i][hr]==CCREG)
8506             regs[i].regmap_entry[hr]=CCREG;
8507           else
8508             regs[i].regmap_entry[hr]=-1;
8509         }
8510       }
8511       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
8512     }
8513
8514     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
8515       current.waswritten|=1<<rs1[i-1];
8516     current.waswritten&=~(1<<rt1[i]);
8517     current.waswritten&=~(1<<rt2[i]);
8518     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
8519       current.waswritten&=~(1<<rs1[i]);
8520
8521     /* Branch post-alloc */
8522     if(i>0)
8523     {
8524       current.was32=current.is32;
8525       current.wasdirty=current.dirty;
8526       switch(itype[i-1]) {
8527         case UJUMP:
8528           memcpy(&branch_regs[i-1],&current,sizeof(current));
8529           branch_regs[i-1].isconst=0;
8530           branch_regs[i-1].wasconst=0;
8531           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8532           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8533           alloc_cc(&branch_regs[i-1],i-1);
8534           dirty_reg(&branch_regs[i-1],CCREG);
8535           if(rt1[i-1]==31) { // JAL
8536             alloc_reg(&branch_regs[i-1],i-1,31);
8537             dirty_reg(&branch_regs[i-1],31);
8538             branch_regs[i-1].is32|=1LL<<31;
8539           }
8540           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8541           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8542           break;
8543         case RJUMP:
8544           memcpy(&branch_regs[i-1],&current,sizeof(current));
8545           branch_regs[i-1].isconst=0;
8546           branch_regs[i-1].wasconst=0;
8547           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8548           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8549           alloc_cc(&branch_regs[i-1],i-1);
8550           dirty_reg(&branch_regs[i-1],CCREG);
8551           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
8552           if(rt1[i-1]!=0) { // JALR
8553             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
8554             dirty_reg(&branch_regs[i-1],rt1[i-1]);
8555             branch_regs[i-1].is32|=1LL<<rt1[i-1];
8556           }
8557           #ifdef USE_MINI_HT
8558           if(rs1[i-1]==31) { // JALR
8559             alloc_reg(&branch_regs[i-1],i-1,RHASH);
8560             #ifndef HOST_IMM_ADDR32
8561             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
8562             #endif
8563           }
8564           #endif
8565           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8566           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8567           break;
8568         case CJUMP:
8569           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
8570           {
8571             alloc_cc(&current,i-1);
8572             dirty_reg(&current,CCREG);
8573             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
8574                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
8575               // The delay slot overwrote one of our conditions
8576               // Delay slot goes after the test (in order)
8577               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8578               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8579               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8580               current.u|=1;
8581               current.uu|=1;
8582               delayslot_alloc(&current,i);
8583               current.isconst=0;
8584             }
8585             else
8586             {
8587               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8588               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8589               // Alloc the branch condition registers
8590               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
8591               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
8592               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
8593               {
8594                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
8595                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
8596               }
8597             }
8598             memcpy(&branch_regs[i-1],&current,sizeof(current));
8599             branch_regs[i-1].isconst=0;
8600             branch_regs[i-1].wasconst=0;
8601             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8602             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8603           }
8604           else
8605           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
8606           {
8607             alloc_cc(&current,i-1);
8608             dirty_reg(&current,CCREG);
8609             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8610               // The delay slot overwrote the branch condition
8611               // Delay slot goes after the test (in order)
8612               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8613               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8614               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8615               current.u|=1;
8616               current.uu|=1;
8617               delayslot_alloc(&current,i);
8618               current.isconst=0;
8619             }
8620             else
8621             {
8622               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8623               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8624               // Alloc the branch condition register
8625               alloc_reg(&current,i-1,rs1[i-1]);
8626               if(!(current.is32>>rs1[i-1]&1))
8627               {
8628                 alloc_reg64(&current,i-1,rs1[i-1]);
8629               }
8630             }
8631             memcpy(&branch_regs[i-1],&current,sizeof(current));
8632             branch_regs[i-1].isconst=0;
8633             branch_regs[i-1].wasconst=0;
8634             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8635             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8636           }
8637           else
8638           // Alloc the delay slot in case the branch is taken
8639           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
8640           {
8641             memcpy(&branch_regs[i-1],&current,sizeof(current));
8642             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8643             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8644             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8645             alloc_cc(&branch_regs[i-1],i);
8646             dirty_reg(&branch_regs[i-1],CCREG);
8647             delayslot_alloc(&branch_regs[i-1],i);
8648             branch_regs[i-1].isconst=0;
8649             alloc_reg(&current,i,CCREG); // Not taken path
8650             dirty_reg(&current,CCREG);
8651             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8652           }
8653           else
8654           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
8655           {
8656             memcpy(&branch_regs[i-1],&current,sizeof(current));
8657             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8658             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8659             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8660             alloc_cc(&branch_regs[i-1],i);
8661             dirty_reg(&branch_regs[i-1],CCREG);
8662             delayslot_alloc(&branch_regs[i-1],i);
8663             branch_regs[i-1].isconst=0;
8664             alloc_reg(&current,i,CCREG); // Not taken path
8665             dirty_reg(&current,CCREG);
8666             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8667           }
8668           break;
8669         case SJUMP:
8670           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
8671           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
8672           {
8673             alloc_cc(&current,i-1);
8674             dirty_reg(&current,CCREG);
8675             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8676               // The delay slot overwrote the branch condition
8677               // Delay slot goes after the test (in order)
8678               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8679               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8680               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8681               current.u|=1;
8682               current.uu|=1;
8683               delayslot_alloc(&current,i);
8684               current.isconst=0;
8685             }
8686             else
8687             {
8688               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8689               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8690               // Alloc the branch condition register
8691               alloc_reg(&current,i-1,rs1[i-1]);
8692               if(!(current.is32>>rs1[i-1]&1))
8693               {
8694                 alloc_reg64(&current,i-1,rs1[i-1]);
8695               }
8696             }
8697             memcpy(&branch_regs[i-1],&current,sizeof(current));
8698             branch_regs[i-1].isconst=0;
8699             branch_regs[i-1].wasconst=0;
8700             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8701             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8702           }
8703           else
8704           // Alloc the delay slot in case the branch is taken
8705           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
8706           {
8707             memcpy(&branch_regs[i-1],&current,sizeof(current));
8708             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8709             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8710             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8711             alloc_cc(&branch_regs[i-1],i);
8712             dirty_reg(&branch_regs[i-1],CCREG);
8713             delayslot_alloc(&branch_regs[i-1],i);
8714             branch_regs[i-1].isconst=0;
8715             alloc_reg(&current,i,CCREG); // Not taken path
8716             dirty_reg(&current,CCREG);
8717             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8718           }
8719           // FIXME: BLTZAL/BGEZAL
8720           if(opcode2[i-1]&0x10) { // BxxZAL
8721             alloc_reg(&branch_regs[i-1],i-1,31);
8722             dirty_reg(&branch_regs[i-1],31);
8723             branch_regs[i-1].is32|=1LL<<31;
8724           }
8725           break;
8726         case FJUMP:
8727           if(likely[i-1]==0) // BC1F/BC1T
8728           {
8729             alloc_cc(&current,i-1);
8730             dirty_reg(&current,CCREG);
8731             if(itype[i]==FCOMP) {
8732               // The delay slot overwrote the branch condition
8733               // Delay slot goes after the test (in order)
8734               delayslot_alloc(&current,i);
8735               current.isconst=0;
8736             }
8737             else
8738             {
8739               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8740               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8741               // Alloc the branch condition register
8742               alloc_reg(&current,i-1,FSREG);
8743             }
8744             memcpy(&branch_regs[i-1],&current,sizeof(current));
8745             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8746           }
8747           else // BC1FL/BC1TL
8748           {
8749             // Alloc the delay slot in case the branch is taken
8750             memcpy(&branch_regs[i-1],&current,sizeof(current));
8751             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8752             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8753             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8754             alloc_cc(&branch_regs[i-1],i);
8755             dirty_reg(&branch_regs[i-1],CCREG);
8756             delayslot_alloc(&branch_regs[i-1],i);
8757             branch_regs[i-1].isconst=0;
8758             alloc_reg(&current,i,CCREG); // Not taken path
8759             dirty_reg(&current,CCREG);
8760             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8761           }
8762           break;
8763       }
8764
8765       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
8766       {
8767         if(rt1[i-1]==31) // JAL/JALR
8768         {
8769           // Subroutine call will return here, don't alloc any registers
8770           current.is32=1;
8771           current.dirty=0;
8772           clear_all_regs(current.regmap);
8773           alloc_reg(&current,i,CCREG);
8774           dirty_reg(&current,CCREG);
8775         }
8776         else if(i+1<slen)
8777         {
8778           // Internal branch will jump here, match registers to caller
8779           current.is32=0x3FFFFFFFFLL;
8780           current.dirty=0;
8781           clear_all_regs(current.regmap);
8782           alloc_reg(&current,i,CCREG);
8783           dirty_reg(&current,CCREG);
8784           for(j=i-1;j>=0;j--)
8785           {
8786             if(ba[j]==start+i*4+4) {
8787               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
8788               current.is32=branch_regs[j].is32;
8789               current.dirty=branch_regs[j].dirty;
8790               break;
8791             }
8792           }
8793           while(j>=0) {
8794             if(ba[j]==start+i*4+4) {
8795               for(hr=0;hr<HOST_REGS;hr++) {
8796                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
8797                   current.regmap[hr]=-1;
8798                 }
8799                 current.is32&=branch_regs[j].is32;
8800                 current.dirty&=branch_regs[j].dirty;
8801               }
8802             }
8803             j--;
8804           }
8805         }
8806       }
8807     }
8808
8809     // Count cycles in between branches
8810     ccadj[i]=cc;
8811     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
8812     {
8813       cc=0;
8814     }
8815 #if !defined(DRC_DBG)
8816     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
8817     {
8818       // GTE runs in parallel until accessed, divide by 2 for a rough guess
8819       cc+=gte_cycletab[source[i]&0x3f]/2;
8820     }
8821     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
8822     {
8823       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
8824     }
8825     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
8826     {
8827       cc+=4;
8828     }
8829     else if(itype[i]==C2LS)
8830     {
8831       cc+=4;
8832     }
8833 #endif
8834     else
8835     {
8836       cc++;
8837     }
8838
8839     flush_dirty_uppers(&current);
8840     if(!is_ds[i]) {
8841       regs[i].is32=current.is32;
8842       regs[i].dirty=current.dirty;
8843       regs[i].isconst=current.isconst;
8844       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
8845     }
8846     for(hr=0;hr<HOST_REGS;hr++) {
8847       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
8848         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
8849           regs[i].wasconst&=~(1<<hr);
8850         }
8851       }
8852     }
8853     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
8854     regs[i].waswritten=current.waswritten;
8855   }
8856
8857   /* Pass 4 - Cull unused host registers */
8858
8859   uint64_t nr=0;
8860
8861   for (i=slen-1;i>=0;i--)
8862   {
8863     int hr;
8864     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
8865     {
8866       if(ba[i]<start || ba[i]>=(start+slen*4))
8867       {
8868         // Branch out of this block, don't need anything
8869         nr=0;
8870       }
8871       else
8872       {
8873         // Internal branch
8874         // Need whatever matches the target
8875         nr=0;
8876         int t=(ba[i]-start)>>2;
8877         for(hr=0;hr<HOST_REGS;hr++)
8878         {
8879           if(regs[i].regmap_entry[hr]>=0) {
8880             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
8881           }
8882         }
8883       }
8884       // Conditional branch may need registers for following instructions
8885       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
8886       {
8887         if(i<slen-2) {
8888           nr|=needed_reg[i+2];
8889           for(hr=0;hr<HOST_REGS;hr++)
8890           {
8891             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
8892             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
8893           }
8894         }
8895       }
8896       // Don't need stuff which is overwritten
8897       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8898       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8899       // Merge in delay slot
8900       for(hr=0;hr<HOST_REGS;hr++)
8901       {
8902         if(!likely[i]) {
8903           // These are overwritten unless the branch is "likely"
8904           // and the delay slot is nullified if not taken
8905           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8906           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8907         }
8908         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8909         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8910         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8911         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8912         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8913         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8914         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8915         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8916         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
8917           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8918           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8919         }
8920         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
8921           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8922           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8923         }
8924         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
8925           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8926           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8927         }
8928       }
8929     }
8930     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
8931     {
8932       // SYSCALL instruction (software interrupt)
8933       nr=0;
8934     }
8935     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
8936     {
8937       // ERET instruction (return from interrupt)
8938       nr=0;
8939     }
8940     else // Non-branch
8941     {
8942       if(i<slen-1) {
8943         for(hr=0;hr<HOST_REGS;hr++) {
8944           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
8945           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
8946           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8947           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8948         }
8949       }
8950     }
8951     for(hr=0;hr<HOST_REGS;hr++)
8952     {
8953       // Overwritten registers are not needed
8954       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8955       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8956       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8957       // Source registers are needed
8958       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8959       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8960       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
8961       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
8962       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8963       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8964       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8965       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8966       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
8967         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8968         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8969       }
8970       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
8971         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8972         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8973       }
8974       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
8975         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8976         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8977       }
8978       // Don't store a register immediately after writing it,
8979       // may prevent dual-issue.
8980       // But do so if this is a branch target, otherwise we
8981       // might have to load the register before the branch.
8982       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
8983         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
8984            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
8985           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8986           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8987         }
8988         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
8989            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
8990           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8991           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8992         }
8993       }
8994     }
8995     // Cycle count is needed at branches.  Assume it is needed at the target too.
8996     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
8997       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8998       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8999     }
9000     // Save it
9001     needed_reg[i]=nr;
9002
9003     // Deallocate unneeded registers
9004     for(hr=0;hr<HOST_REGS;hr++)
9005     {
9006       if(!((nr>>hr)&1)) {
9007         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9008         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9009            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9010            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9011         {
9012           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9013           {
9014             if(likely[i]) {
9015               regs[i].regmap[hr]=-1;
9016               regs[i].isconst&=~(1<<hr);
9017               if(i<slen-2) {
9018                 regmap_pre[i+2][hr]=-1;
9019                 regs[i+2].wasconst&=~(1<<hr);
9020               }
9021             }
9022           }
9023         }
9024         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9025         {
9026           int d1=0,d2=0,map=0,temp=0;
9027           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9028           {
9029             d1=dep1[i+1];
9030             d2=dep2[i+1];
9031           }
9032           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9033              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9034             map=INVCP;
9035           }
9036           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9037              itype[i+1]==C1LS || itype[i+1]==C2LS)
9038             temp=FTEMP;
9039           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9040              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9041              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9042              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9043              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9044              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9045              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9046              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9047              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9048              regs[i].regmap[hr]!=map )
9049           {
9050             regs[i].regmap[hr]=-1;
9051             regs[i].isconst&=~(1<<hr);
9052             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9053                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9054                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9055                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9056                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9057                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9058                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9059                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9060                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9061                branch_regs[i].regmap[hr]!=map)
9062             {
9063               branch_regs[i].regmap[hr]=-1;
9064               branch_regs[i].regmap_entry[hr]=-1;
9065               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9066               {
9067                 if(!likely[i]&&i<slen-2) {
9068                   regmap_pre[i+2][hr]=-1;
9069                   regs[i+2].wasconst&=~(1<<hr);
9070                 }
9071               }
9072             }
9073           }
9074         }
9075         else
9076         {
9077           // Non-branch
9078           if(i>0)
9079           {
9080             int d1=0,d2=0,map=-1,temp=-1;
9081             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9082             {
9083               d1=dep1[i];
9084               d2=dep2[i];
9085             }
9086             if(itype[i]==STORE || itype[i]==STORELR ||
9087                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9088               map=INVCP;
9089             }
9090             if(itype[i]==LOADLR || itype[i]==STORELR ||
9091                itype[i]==C1LS || itype[i]==C2LS)
9092               temp=FTEMP;
9093             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9094                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9095                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9096                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9097                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9098                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9099             {
9100               if(i<slen-1&&!is_ds[i]) {
9101                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9102                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9103                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9104                 {
9105                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9106                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9107                 }
9108                 regmap_pre[i+1][hr]=-1;
9109                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9110                 regs[i+1].wasconst&=~(1<<hr);
9111               }
9112               regs[i].regmap[hr]=-1;
9113               regs[i].isconst&=~(1<<hr);
9114             }
9115           }
9116         }
9117       }
9118     }
9119   }
9120
9121   /* Pass 5 - Pre-allocate registers */
9122
9123   // If a register is allocated during a loop, try to allocate it for the
9124   // entire loop, if possible.  This avoids loading/storing registers
9125   // inside of the loop.
9126
9127   signed char f_regmap[HOST_REGS];
9128   clear_all_regs(f_regmap);
9129   for(i=0;i<slen-1;i++)
9130   {
9131     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9132     {
9133       if(ba[i]>=start && ba[i]<(start+i*4))
9134       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9135       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9136       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9137       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9138       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9139       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9140       {
9141         int t=(ba[i]-start)>>2;
9142         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9143         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
9144         for(hr=0;hr<HOST_REGS;hr++)
9145         {
9146           if(regs[i].regmap[hr]>64) {
9147             if(!((regs[i].dirty>>hr)&1))
9148               f_regmap[hr]=regs[i].regmap[hr];
9149             else f_regmap[hr]=-1;
9150           }
9151           else if(regs[i].regmap[hr]>=0) {
9152             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9153               // dealloc old register
9154               int n;
9155               for(n=0;n<HOST_REGS;n++)
9156               {
9157                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9158               }
9159               // and alloc new one
9160               f_regmap[hr]=regs[i].regmap[hr];
9161             }
9162           }
9163           if(branch_regs[i].regmap[hr]>64) {
9164             if(!((branch_regs[i].dirty>>hr)&1))
9165               f_regmap[hr]=branch_regs[i].regmap[hr];
9166             else f_regmap[hr]=-1;
9167           }
9168           else if(branch_regs[i].regmap[hr]>=0) {
9169             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9170               // dealloc old register
9171               int n;
9172               for(n=0;n<HOST_REGS;n++)
9173               {
9174                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9175               }
9176               // and alloc new one
9177               f_regmap[hr]=branch_regs[i].regmap[hr];
9178             }
9179           }
9180           if(ooo[i]) {
9181             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
9182               f_regmap[hr]=branch_regs[i].regmap[hr];
9183           }else{
9184             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
9185               f_regmap[hr]=branch_regs[i].regmap[hr];
9186           }
9187           // Avoid dirty->clean transition
9188           #ifdef DESTRUCTIVE_WRITEBACK
9189           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9190           #endif
9191           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
9192           // case above, however it's always a good idea.  We can't hoist the
9193           // load if the register was already allocated, so there's no point
9194           // wasting time analyzing most of these cases.  It only "succeeds"
9195           // when the mapping was different and the load can be replaced with
9196           // a mov, which is of negligible benefit.  So such cases are
9197           // skipped below.
9198           if(f_regmap[hr]>0) {
9199             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
9200               int r=f_regmap[hr];
9201               for(j=t;j<=i;j++)
9202               {
9203                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9204                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9205                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9206                 if(r>63) {
9207                   // NB This can exclude the case where the upper-half
9208                   // register is lower numbered than the lower-half
9209                   // register.  Not sure if it's worth fixing...
9210                   if(get_reg(regs[j].regmap,r&63)<0) break;
9211                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
9212                   if(regs[j].is32&(1LL<<(r&63))) break;
9213                 }
9214                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9215                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9216                   int k;
9217                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9218                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9219                     if(r>63) {
9220                       if(get_reg(regs[i].regmap,r&63)<0) break;
9221                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9222                     }
9223                     k=i;
9224                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9225                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9226                         //printf("no free regs for store %x\n",start+(k-1)*4);
9227                         break;
9228                       }
9229                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9230                         //printf("no-match due to different register\n");
9231                         break;
9232                       }
9233                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9234                         //printf("no-match due to branch\n");
9235                         break;
9236                       }
9237                       // call/ret fast path assumes no registers allocated
9238                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
9239                         break;
9240                       }
9241                       if(r>63) {
9242                         // NB This can exclude the case where the upper-half
9243                         // register is lower numbered than the lower-half
9244                         // register.  Not sure if it's worth fixing...
9245                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9246                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9247                       }
9248                       k--;
9249                     }
9250                     if(i<slen-1) {
9251                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9252                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9253                         //printf("bad match after branch\n");
9254                         break;
9255                       }
9256                     }
9257                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9258                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9259                       while(k<i) {
9260                         regs[k].regmap_entry[hr]=f_regmap[hr];
9261                         regs[k].regmap[hr]=f_regmap[hr];
9262                         regmap_pre[k+1][hr]=f_regmap[hr];
9263                         regs[k].wasdirty&=~(1<<hr);
9264                         regs[k].dirty&=~(1<<hr);
9265                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9266                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9267                         regs[k].wasconst&=~(1<<hr);
9268                         regs[k].isconst&=~(1<<hr);
9269                         k++;
9270                       }
9271                     }
9272                     else {
9273                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9274                       break;
9275                     }
9276                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9277                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9278                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9279                       regs[i].regmap_entry[hr]=f_regmap[hr];
9280                       regs[i].regmap[hr]=f_regmap[hr];
9281                       regs[i].wasdirty&=~(1<<hr);
9282                       regs[i].dirty&=~(1<<hr);
9283                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9284                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9285                       regs[i].wasconst&=~(1<<hr);
9286                       regs[i].isconst&=~(1<<hr);
9287                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9288                       branch_regs[i].wasdirty&=~(1<<hr);
9289                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9290                       branch_regs[i].regmap[hr]=f_regmap[hr];
9291                       branch_regs[i].dirty&=~(1<<hr);
9292                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9293                       branch_regs[i].wasconst&=~(1<<hr);
9294                       branch_regs[i].isconst&=~(1<<hr);
9295                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9296                         regmap_pre[i+2][hr]=f_regmap[hr];
9297                         regs[i+2].wasdirty&=~(1<<hr);
9298                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9299                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9300                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9301                       }
9302                     }
9303                   }
9304                   for(k=t;k<j;k++) {
9305                     // Alloc register clean at beginning of loop,
9306                     // but may dirty it in pass 6
9307                     regs[k].regmap_entry[hr]=f_regmap[hr];
9308                     regs[k].regmap[hr]=f_regmap[hr];
9309                     regs[k].dirty&=~(1<<hr);
9310                     regs[k].wasconst&=~(1<<hr);
9311                     regs[k].isconst&=~(1<<hr);
9312                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
9313                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
9314                       branch_regs[k].regmap[hr]=f_regmap[hr];
9315                       branch_regs[k].dirty&=~(1<<hr);
9316                       branch_regs[k].wasconst&=~(1<<hr);
9317                       branch_regs[k].isconst&=~(1<<hr);
9318                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
9319                         regmap_pre[k+2][hr]=f_regmap[hr];
9320                         regs[k+2].wasdirty&=~(1<<hr);
9321                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
9322                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
9323                       }
9324                     }
9325                     else
9326                     {
9327                       regmap_pre[k+1][hr]=f_regmap[hr];
9328                       regs[k+1].wasdirty&=~(1<<hr);
9329                     }
9330                   }
9331                   if(regs[j].regmap[hr]==f_regmap[hr])
9332                     regs[j].regmap_entry[hr]=f_regmap[hr];
9333                   break;
9334                 }
9335                 if(j==i) break;
9336                 if(regs[j].regmap[hr]>=0)
9337                   break;
9338                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9339                   //printf("no-match due to different register\n");
9340                   break;
9341                 }
9342                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9343                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9344                   break;
9345                 }
9346                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9347                 {
9348                   // Stop on unconditional branch
9349                   break;
9350                 }
9351                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
9352                 {
9353                   if(ooo[j]) {
9354                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
9355                       break;
9356                   }else{
9357                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
9358                       break;
9359                   }
9360                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
9361                     //printf("no-match due to different register (branch)\n");
9362                     break;
9363                   }
9364                 }
9365                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9366                   //printf("No free regs for store %x\n",start+j*4);
9367                   break;
9368                 }
9369                 if(f_regmap[hr]>=64) {
9370                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9371                     break;
9372                   }
9373                   else
9374                   {
9375                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9376                       break;
9377                     }
9378                   }
9379                 }
9380               }
9381             }
9382           }
9383         }
9384       }
9385     }else{
9386       // Non branch or undetermined branch target
9387       for(hr=0;hr<HOST_REGS;hr++)
9388       {
9389         if(hr!=EXCLUDE_REG) {
9390           if(regs[i].regmap[hr]>64) {
9391             if(!((regs[i].dirty>>hr)&1))
9392               f_regmap[hr]=regs[i].regmap[hr];
9393           }
9394           else if(regs[i].regmap[hr]>=0) {
9395             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9396               // dealloc old register
9397               int n;
9398               for(n=0;n<HOST_REGS;n++)
9399               {
9400                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9401               }
9402               // and alloc new one
9403               f_regmap[hr]=regs[i].regmap[hr];
9404             }
9405           }
9406         }
9407       }
9408       // Try to restore cycle count at branch targets
9409       if(bt[i]) {
9410         for(j=i;j<slen-1;j++) {
9411           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9412           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9413             //printf("no free regs for store %x\n",start+j*4);
9414             break;
9415           }
9416         }
9417         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9418           int k=i;
9419           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9420           while(k<j) {
9421             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9422             regs[k].regmap[HOST_CCREG]=CCREG;
9423             regmap_pre[k+1][HOST_CCREG]=CCREG;
9424             regs[k+1].wasdirty|=1<<HOST_CCREG;
9425             regs[k].dirty|=1<<HOST_CCREG;
9426             regs[k].wasconst&=~(1<<HOST_CCREG);
9427             regs[k].isconst&=~(1<<HOST_CCREG);
9428             k++;
9429           }
9430           regs[j].regmap_entry[HOST_CCREG]=CCREG;
9431         }
9432         // Work backwards from the branch target
9433         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9434         {
9435           //printf("Extend backwards\n");
9436           int k;
9437           k=i;
9438           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9439             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9440               //printf("no free regs for store %x\n",start+(k-1)*4);
9441               break;
9442             }
9443             k--;
9444           }
9445           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9446             //printf("Extend CC, %x ->\n",start+k*4);
9447             while(k<=i) {
9448               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9449               regs[k].regmap[HOST_CCREG]=CCREG;
9450               regmap_pre[k+1][HOST_CCREG]=CCREG;
9451               regs[k+1].wasdirty|=1<<HOST_CCREG;
9452               regs[k].dirty|=1<<HOST_CCREG;
9453               regs[k].wasconst&=~(1<<HOST_CCREG);
9454               regs[k].isconst&=~(1<<HOST_CCREG);
9455               k++;
9456             }
9457           }
9458           else {
9459             //printf("Fail Extend CC, %x ->\n",start+k*4);
9460           }
9461         }
9462       }
9463       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9464          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9465          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9466          itype[i]!=FCONV&&itype[i]!=FCOMP)
9467       {
9468         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9469       }
9470     }
9471   }
9472
9473   // Cache memory offset or tlb map pointer if a register is available
9474   #ifndef HOST_IMM_ADDR32
9475   #ifndef RAM_OFFSET
9476   if(0)
9477   #endif
9478   {
9479     int earliest_available[HOST_REGS];
9480     int loop_start[HOST_REGS];
9481     int score[HOST_REGS];
9482     int end[HOST_REGS];
9483     int reg=ROREG;
9484
9485     // Init
9486     for(hr=0;hr<HOST_REGS;hr++) {
9487       score[hr]=0;earliest_available[hr]=0;
9488       loop_start[hr]=MAXBLOCK;
9489     }
9490     for(i=0;i<slen-1;i++)
9491     {
9492       // Can't do anything if no registers are available
9493       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
9494         for(hr=0;hr<HOST_REGS;hr++) {
9495           score[hr]=0;earliest_available[hr]=i+1;
9496           loop_start[hr]=MAXBLOCK;
9497         }
9498       }
9499       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9500         if(!ooo[i]) {
9501           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
9502             for(hr=0;hr<HOST_REGS;hr++) {
9503               score[hr]=0;earliest_available[hr]=i+1;
9504               loop_start[hr]=MAXBLOCK;
9505             }
9506           }
9507         }else{
9508           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
9509             for(hr=0;hr<HOST_REGS;hr++) {
9510               score[hr]=0;earliest_available[hr]=i+1;
9511               loop_start[hr]=MAXBLOCK;
9512             }
9513           }
9514         }
9515       }
9516       // Mark unavailable registers
9517       for(hr=0;hr<HOST_REGS;hr++) {
9518         if(regs[i].regmap[hr]>=0) {
9519           score[hr]=0;earliest_available[hr]=i+1;
9520           loop_start[hr]=MAXBLOCK;
9521         }
9522         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9523           if(branch_regs[i].regmap[hr]>=0) {
9524             score[hr]=0;earliest_available[hr]=i+2;
9525             loop_start[hr]=MAXBLOCK;
9526           }
9527         }
9528       }
9529       // No register allocations after unconditional jumps
9530       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
9531       {
9532         for(hr=0;hr<HOST_REGS;hr++) {
9533           score[hr]=0;earliest_available[hr]=i+2;
9534           loop_start[hr]=MAXBLOCK;
9535         }
9536         i++; // Skip delay slot too
9537         //printf("skip delay slot: %x\n",start+i*4);
9538       }
9539       else
9540       // Possible match
9541       if(itype[i]==LOAD||itype[i]==LOADLR||
9542          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
9543         for(hr=0;hr<HOST_REGS;hr++) {
9544           if(hr!=EXCLUDE_REG) {
9545             end[hr]=i-1;
9546             for(j=i;j<slen-1;j++) {
9547               if(regs[j].regmap[hr]>=0) break;
9548               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9549                 if(branch_regs[j].regmap[hr]>=0) break;
9550                 if(ooo[j]) {
9551                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
9552                 }else{
9553                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
9554                 }
9555               }
9556               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
9557               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9558                 int t=(ba[j]-start)>>2;
9559                 if(t<j&&t>=earliest_available[hr]) {
9560                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
9561                     // Score a point for hoisting loop invariant
9562                     if(t<loop_start[hr]) loop_start[hr]=t;
9563                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
9564                     score[hr]++;
9565                     end[hr]=j;
9566                   }
9567                 }
9568                 else if(t<j) {
9569                   if(regs[t].regmap[hr]==reg) {
9570                     // Score a point if the branch target matches this register
9571                     score[hr]++;
9572                     end[hr]=j;
9573                   }
9574                 }
9575                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
9576                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
9577                   score[hr]++;
9578                   end[hr]=j;
9579                 }
9580               }
9581               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9582               {
9583                 // Stop on unconditional branch
9584                 break;
9585               }
9586               else
9587               if(itype[j]==LOAD||itype[j]==LOADLR||
9588                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
9589                 score[hr]++;
9590                 end[hr]=j;
9591               }
9592             }
9593           }
9594         }
9595         // Find highest score and allocate that register
9596         int maxscore=0;
9597         for(hr=0;hr<HOST_REGS;hr++) {
9598           if(hr!=EXCLUDE_REG) {
9599             if(score[hr]>score[maxscore]) {
9600               maxscore=hr;
9601               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
9602             }
9603           }
9604         }
9605         if(score[maxscore]>1)
9606         {
9607           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
9608           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
9609             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
9610             assert(regs[j].regmap[maxscore]<0);
9611             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
9612             regs[j].regmap[maxscore]=reg;
9613             regs[j].dirty&=~(1<<maxscore);
9614             regs[j].wasconst&=~(1<<maxscore);
9615             regs[j].isconst&=~(1<<maxscore);
9616             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9617               branch_regs[j].regmap[maxscore]=reg;
9618               branch_regs[j].wasdirty&=~(1<<maxscore);
9619               branch_regs[j].dirty&=~(1<<maxscore);
9620               branch_regs[j].wasconst&=~(1<<maxscore);
9621               branch_regs[j].isconst&=~(1<<maxscore);
9622               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
9623                 regmap_pre[j+2][maxscore]=reg;
9624                 regs[j+2].wasdirty&=~(1<<maxscore);
9625               }
9626               // loop optimization (loop_preload)
9627               int t=(ba[j]-start)>>2;
9628               if(t==loop_start[maxscore]) {
9629                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
9630                   regs[t].regmap_entry[maxscore]=reg;
9631               }
9632             }
9633             else
9634             {
9635               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
9636                 regmap_pre[j+1][maxscore]=reg;
9637                 regs[j+1].wasdirty&=~(1<<maxscore);
9638               }
9639             }
9640           }
9641           i=j-1;
9642           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
9643           for(hr=0;hr<HOST_REGS;hr++) {
9644             score[hr]=0;earliest_available[hr]=i+i;
9645             loop_start[hr]=MAXBLOCK;
9646           }
9647         }
9648       }
9649     }
9650   }
9651   #endif
9652
9653   // This allocates registers (if possible) one instruction prior
9654   // to use, which can avoid a load-use penalty on certain CPUs.
9655   for(i=0;i<slen-1;i++)
9656   {
9657     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9658     {
9659       if(!bt[i+1])
9660       {
9661         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9662            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9663         {
9664           if(rs1[i+1]) {
9665             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9666             {
9667               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9668               {
9669                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9670                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9671                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9672                 regs[i].isconst&=~(1<<hr);
9673                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9674                 constmap[i][hr]=constmap[i+1][hr];
9675                 regs[i+1].wasdirty&=~(1<<hr);
9676                 regs[i].dirty&=~(1<<hr);
9677               }
9678             }
9679           }
9680           if(rs2[i+1]) {
9681             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9682             {
9683               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9684               {
9685                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9686                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9687                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9688                 regs[i].isconst&=~(1<<hr);
9689                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9690                 constmap[i][hr]=constmap[i+1][hr];
9691                 regs[i+1].wasdirty&=~(1<<hr);
9692                 regs[i].dirty&=~(1<<hr);
9693               }
9694             }
9695           }
9696           // Preload target address for load instruction (non-constant)
9697           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9698             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9699             {
9700               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9701               {
9702                 regs[i].regmap[hr]=rs1[i+1];
9703                 regmap_pre[i+1][hr]=rs1[i+1];
9704                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9705                 regs[i].isconst&=~(1<<hr);
9706                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9707                 constmap[i][hr]=constmap[i+1][hr];
9708                 regs[i+1].wasdirty&=~(1<<hr);
9709                 regs[i].dirty&=~(1<<hr);
9710               }
9711             }
9712           }
9713           // Load source into target register
9714           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9715             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9716             {
9717               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9718               {
9719                 regs[i].regmap[hr]=rs1[i+1];
9720                 regmap_pre[i+1][hr]=rs1[i+1];
9721                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9722                 regs[i].isconst&=~(1<<hr);
9723                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9724                 constmap[i][hr]=constmap[i+1][hr];
9725                 regs[i+1].wasdirty&=~(1<<hr);
9726                 regs[i].dirty&=~(1<<hr);
9727               }
9728             }
9729           }
9730           // Address for store instruction (non-constant)
9731           if(itype[i+1]==STORE||itype[i+1]==STORELR
9732              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
9733             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9734               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
9735               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9736               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
9737               assert(hr>=0);
9738               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9739               {
9740                 regs[i].regmap[hr]=rs1[i+1];
9741                 regmap_pre[i+1][hr]=rs1[i+1];
9742                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9743                 regs[i].isconst&=~(1<<hr);
9744                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9745                 constmap[i][hr]=constmap[i+1][hr];
9746                 regs[i+1].wasdirty&=~(1<<hr);
9747                 regs[i].dirty&=~(1<<hr);
9748               }
9749             }
9750           }
9751           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
9752             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9753               int nr;
9754               hr=get_reg(regs[i+1].regmap,FTEMP);
9755               assert(hr>=0);
9756               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9757               {
9758                 regs[i].regmap[hr]=rs1[i+1];
9759                 regmap_pre[i+1][hr]=rs1[i+1];
9760                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9761                 regs[i].isconst&=~(1<<hr);
9762                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9763                 constmap[i][hr]=constmap[i+1][hr];
9764                 regs[i+1].wasdirty&=~(1<<hr);
9765                 regs[i].dirty&=~(1<<hr);
9766               }
9767               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
9768               {
9769                 // move it to another register
9770                 regs[i+1].regmap[hr]=-1;
9771                 regmap_pre[i+2][hr]=-1;
9772                 regs[i+1].regmap[nr]=FTEMP;
9773                 regmap_pre[i+2][nr]=FTEMP;
9774                 regs[i].regmap[nr]=rs1[i+1];
9775                 regmap_pre[i+1][nr]=rs1[i+1];
9776                 regs[i+1].regmap_entry[nr]=rs1[i+1];
9777                 regs[i].isconst&=~(1<<nr);
9778                 regs[i+1].isconst&=~(1<<nr);
9779                 regs[i].dirty&=~(1<<nr);
9780                 regs[i+1].wasdirty&=~(1<<nr);
9781                 regs[i+1].dirty&=~(1<<nr);
9782                 regs[i+2].wasdirty&=~(1<<nr);
9783               }
9784             }
9785           }
9786           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
9787             if(itype[i+1]==LOAD)
9788               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
9789             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
9790               hr=get_reg(regs[i+1].regmap,FTEMP);
9791             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
9792               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
9793               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9794             }
9795             if(hr>=0&&regs[i].regmap[hr]<0) {
9796               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
9797               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
9798                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
9799                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
9800                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
9801                 regs[i].isconst&=~(1<<hr);
9802                 regs[i+1].wasdirty&=~(1<<hr);
9803                 regs[i].dirty&=~(1<<hr);
9804               }
9805             }
9806           }
9807         }
9808       }
9809     }
9810   }
9811
9812   /* Pass 6 - Optimize clean/dirty state */
9813   clean_registers(0,slen-1,1);
9814
9815   /* Pass 7 - Identify 32-bit registers */
9816   for (i=slen-1;i>=0;i--)
9817   {
9818     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9819     {
9820       // Conditional branch
9821       if((source[i]>>16)!=0x1000&&i<slen-2) {
9822         // Mark this address as a branch target since it may be called
9823         // upon return from interrupt
9824         bt[i+2]=1;
9825       }
9826     }
9827   }
9828
9829   if(itype[slen-1]==SPAN) {
9830     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
9831   }
9832
9833 #ifdef DISASM
9834   /* Debug/disassembly */
9835   for(i=0;i<slen;i++)
9836   {
9837     printf("U:");
9838     int r;
9839     for(r=1;r<=CCREG;r++) {
9840       if((unneeded_reg[i]>>r)&1) {
9841         if(r==HIREG) printf(" HI");
9842         else if(r==LOREG) printf(" LO");
9843         else printf(" r%d",r);
9844       }
9845     }
9846     printf("\n");
9847     #if defined(__i386__) || defined(__x86_64__)
9848     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
9849     #endif
9850     #ifdef __arm__
9851     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
9852     #endif
9853     printf("needs: ");
9854     if(needed_reg[i]&1) printf("eax ");
9855     if((needed_reg[i]>>1)&1) printf("ecx ");
9856     if((needed_reg[i]>>2)&1) printf("edx ");
9857     if((needed_reg[i]>>3)&1) printf("ebx ");
9858     if((needed_reg[i]>>5)&1) printf("ebp ");
9859     if((needed_reg[i]>>6)&1) printf("esi ");
9860     if((needed_reg[i]>>7)&1) printf("edi ");
9861     printf("\n");
9862     #if defined(__i386__) || defined(__x86_64__)
9863     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
9864     printf("dirty: ");
9865     if(regs[i].wasdirty&1) printf("eax ");
9866     if((regs[i].wasdirty>>1)&1) printf("ecx ");
9867     if((regs[i].wasdirty>>2)&1) printf("edx ");
9868     if((regs[i].wasdirty>>3)&1) printf("ebx ");
9869     if((regs[i].wasdirty>>5)&1) printf("ebp ");
9870     if((regs[i].wasdirty>>6)&1) printf("esi ");
9871     if((regs[i].wasdirty>>7)&1) printf("edi ");
9872     #endif
9873     #ifdef __arm__
9874     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
9875     printf("dirty: ");
9876     if(regs[i].wasdirty&1) printf("r0 ");
9877     if((regs[i].wasdirty>>1)&1) printf("r1 ");
9878     if((regs[i].wasdirty>>2)&1) printf("r2 ");
9879     if((regs[i].wasdirty>>3)&1) printf("r3 ");
9880     if((regs[i].wasdirty>>4)&1) printf("r4 ");
9881     if((regs[i].wasdirty>>5)&1) printf("r5 ");
9882     if((regs[i].wasdirty>>6)&1) printf("r6 ");
9883     if((regs[i].wasdirty>>7)&1) printf("r7 ");
9884     if((regs[i].wasdirty>>8)&1) printf("r8 ");
9885     if((regs[i].wasdirty>>9)&1) printf("r9 ");
9886     if((regs[i].wasdirty>>10)&1) printf("r10 ");
9887     if((regs[i].wasdirty>>12)&1) printf("r12 ");
9888     #endif
9889     printf("\n");
9890     disassemble_inst(i);
9891     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
9892     #if defined(__i386__) || defined(__x86_64__)
9893     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
9894     if(regs[i].dirty&1) printf("eax ");
9895     if((regs[i].dirty>>1)&1) printf("ecx ");
9896     if((regs[i].dirty>>2)&1) printf("edx ");
9897     if((regs[i].dirty>>3)&1) printf("ebx ");
9898     if((regs[i].dirty>>5)&1) printf("ebp ");
9899     if((regs[i].dirty>>6)&1) printf("esi ");
9900     if((regs[i].dirty>>7)&1) printf("edi ");
9901     #endif
9902     #ifdef __arm__
9903     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
9904     if(regs[i].dirty&1) printf("r0 ");
9905     if((regs[i].dirty>>1)&1) printf("r1 ");
9906     if((regs[i].dirty>>2)&1) printf("r2 ");
9907     if((regs[i].dirty>>3)&1) printf("r3 ");
9908     if((regs[i].dirty>>4)&1) printf("r4 ");
9909     if((regs[i].dirty>>5)&1) printf("r5 ");
9910     if((regs[i].dirty>>6)&1) printf("r6 ");
9911     if((regs[i].dirty>>7)&1) printf("r7 ");
9912     if((regs[i].dirty>>8)&1) printf("r8 ");
9913     if((regs[i].dirty>>9)&1) printf("r9 ");
9914     if((regs[i].dirty>>10)&1) printf("r10 ");
9915     if((regs[i].dirty>>12)&1) printf("r12 ");
9916     #endif
9917     printf("\n");
9918     if(regs[i].isconst) {
9919       printf("constants: ");
9920       #if defined(__i386__) || defined(__x86_64__)
9921       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
9922       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
9923       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
9924       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
9925       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
9926       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
9927       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
9928       #endif
9929       #ifdef __arm__
9930       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
9931       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
9932       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
9933       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
9934       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
9935       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
9936       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
9937       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
9938       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
9939       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
9940       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
9941       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
9942       #endif
9943       printf("\n");
9944     }
9945     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9946       #if defined(__i386__) || defined(__x86_64__)
9947       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
9948       if(branch_regs[i].dirty&1) printf("eax ");
9949       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
9950       if((branch_regs[i].dirty>>2)&1) printf("edx ");
9951       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
9952       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
9953       if((branch_regs[i].dirty>>6)&1) printf("esi ");
9954       if((branch_regs[i].dirty>>7)&1) printf("edi ");
9955       #endif
9956       #ifdef __arm__
9957       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
9958       if(branch_regs[i].dirty&1) printf("r0 ");
9959       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
9960       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
9961       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
9962       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
9963       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
9964       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
9965       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
9966       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
9967       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
9968       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
9969       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
9970       #endif
9971     }
9972   }
9973 #endif // DISASM
9974
9975   /* Pass 8 - Assembly */
9976   linkcount=0;stubcount=0;
9977   ds=0;is_delayslot=0;
9978   cop1_usable=0;
9979   uint64_t is32_pre=0;
9980   u_int dirty_pre=0;
9981   void *beginning=start_block();
9982   if((u_int)addr&1) {
9983     ds=1;
9984     pagespan_ds();
9985   }
9986   u_int instr_addr0_override=0;
9987
9988   if (start == 0x80030000) {
9989     // nasty hack for fastbios thing
9990     // override block entry to this code
9991     instr_addr0_override=(u_int)out;
9992     emit_movimm(start,0);
9993     // abuse io address var as a flag that we
9994     // have already returned here once
9995     emit_readword((int)&address,1);
9996     emit_writeword(0,(int)&pcaddr);
9997     emit_writeword(0,(int)&address);
9998     emit_cmp(0,1);
9999     emit_jne((int)new_dyna_leave);
10000   }
10001   for(i=0;i<slen;i++)
10002   {
10003     //if(ds) printf("ds: ");
10004     disassemble_inst(i);
10005     if(ds) {
10006       ds=0; // Skip delay slot
10007       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10008       instr_addr[i]=0;
10009     } else {
10010       speculate_register_values(i);
10011       #ifndef DESTRUCTIVE_WRITEBACK
10012       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10013       {
10014         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10015               unneeded_reg[i],unneeded_reg_upper[i]);
10016       }
10017       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
10018         is32_pre=branch_regs[i].is32;
10019         dirty_pre=branch_regs[i].dirty;
10020       }else{
10021         is32_pre=regs[i].is32;
10022         dirty_pre=regs[i].dirty;
10023       }
10024       #endif
10025       // write back
10026       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10027       {
10028         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10029                       unneeded_reg[i],unneeded_reg_upper[i]);
10030         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10031       }
10032       // branch target entry point
10033       instr_addr[i]=(u_int)out;
10034       assem_debug("<->\n");
10035       // load regs
10036       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10037         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10038       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10039       address_generation(i,&regs[i],regs[i].regmap_entry);
10040       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10041       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10042       {
10043         // Load the delay slot registers if necessary
10044         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
10045           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10046         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
10047           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10048         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10049           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10050       }
10051       else if(i+1<slen)
10052       {
10053         // Preload registers for following instruction
10054         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10055           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10056             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10057         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10058           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10059             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10060       }
10061       // TODO: if(is_ooo(i)) address_generation(i+1);
10062       if(itype[i]==CJUMP||itype[i]==FJUMP)
10063         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10064       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10065         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10066       if(bt[i]) cop1_usable=0;
10067       // assemble
10068       switch(itype[i]) {
10069         case ALU:
10070           alu_assemble(i,&regs[i]);break;
10071         case IMM16:
10072           imm16_assemble(i,&regs[i]);break;
10073         case SHIFT:
10074           shift_assemble(i,&regs[i]);break;
10075         case SHIFTIMM:
10076           shiftimm_assemble(i,&regs[i]);break;
10077         case LOAD:
10078           load_assemble(i,&regs[i]);break;
10079         case LOADLR:
10080           loadlr_assemble(i,&regs[i]);break;
10081         case STORE:
10082           store_assemble(i,&regs[i]);break;
10083         case STORELR:
10084           storelr_assemble(i,&regs[i]);break;
10085         case COP0:
10086           cop0_assemble(i,&regs[i]);break;
10087         case COP1:
10088           cop1_assemble(i,&regs[i]);break;
10089         case C1LS:
10090           c1ls_assemble(i,&regs[i]);break;
10091         case COP2:
10092           cop2_assemble(i,&regs[i]);break;
10093         case C2LS:
10094           c2ls_assemble(i,&regs[i]);break;
10095         case C2OP:
10096           c2op_assemble(i,&regs[i]);break;
10097         case FCONV:
10098           fconv_assemble(i,&regs[i]);break;
10099         case FLOAT:
10100           float_assemble(i,&regs[i]);break;
10101         case FCOMP:
10102           fcomp_assemble(i,&regs[i]);break;
10103         case MULTDIV:
10104           multdiv_assemble(i,&regs[i]);break;
10105         case MOV:
10106           mov_assemble(i,&regs[i]);break;
10107         case SYSCALL:
10108           syscall_assemble(i,&regs[i]);break;
10109         case HLECALL:
10110           hlecall_assemble(i,&regs[i]);break;
10111         case INTCALL:
10112           intcall_assemble(i,&regs[i]);break;
10113         case UJUMP:
10114           ujump_assemble(i,&regs[i]);ds=1;break;
10115         case RJUMP:
10116           rjump_assemble(i,&regs[i]);ds=1;break;
10117         case CJUMP:
10118           cjump_assemble(i,&regs[i]);ds=1;break;
10119         case SJUMP:
10120           sjump_assemble(i,&regs[i]);ds=1;break;
10121         case FJUMP:
10122           fjump_assemble(i,&regs[i]);ds=1;break;
10123         case SPAN:
10124           pagespan_assemble(i,&regs[i]);break;
10125       }
10126       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10127         literal_pool(1024);
10128       else
10129         literal_pool_jumpover(256);
10130     }
10131   }
10132   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10133   // If the block did not end with an unconditional branch,
10134   // add a jump to the next instruction.
10135   if(i>1) {
10136     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10137       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10138       assert(i==slen);
10139       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10140         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10141         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10142           emit_loadreg(CCREG,HOST_CCREG);
10143         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10144       }
10145       else if(!likely[i-2])
10146       {
10147         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10148         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10149       }
10150       else
10151       {
10152         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10153         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10154       }
10155       add_to_linker((int)out,start+i*4,0);
10156       emit_jmp(0);
10157     }
10158   }
10159   else
10160   {
10161     assert(i>0);
10162     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10163     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10164     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10165       emit_loadreg(CCREG,HOST_CCREG);
10166     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10167     add_to_linker((int)out,start+i*4,0);
10168     emit_jmp(0);
10169   }
10170
10171   // TODO: delay slot stubs?
10172   // Stubs
10173   for(i=0;i<stubcount;i++)
10174   {
10175     switch(stubs[i][0])
10176     {
10177       case LOADB_STUB:
10178       case LOADH_STUB:
10179       case LOADW_STUB:
10180       case LOADD_STUB:
10181       case LOADBU_STUB:
10182       case LOADHU_STUB:
10183         do_readstub(i);break;
10184       case STOREB_STUB:
10185       case STOREH_STUB:
10186       case STOREW_STUB:
10187       case STORED_STUB:
10188         do_writestub(i);break;
10189       case CC_STUB:
10190         do_ccstub(i);break;
10191       case INVCODE_STUB:
10192         do_invstub(i);break;
10193       case FP_STUB:
10194         do_cop1stub(i);break;
10195       case STORELR_STUB:
10196         do_unalignedwritestub(i);break;
10197     }
10198   }
10199
10200   if (instr_addr0_override)
10201     instr_addr[0] = instr_addr0_override;
10202
10203   /* Pass 9 - Linker */
10204   for(i=0;i<linkcount;i++)
10205   {
10206     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10207     literal_pool(64);
10208     if(!link_addr[i][2])
10209     {
10210       void *stub=out;
10211       void *addr=check_addr(link_addr[i][1]);
10212       emit_extjump(link_addr[i][0],link_addr[i][1]);
10213       if(addr) {
10214         set_jump_target(link_addr[i][0],(int)addr);
10215         add_link(link_addr[i][1],stub);
10216       }
10217       else set_jump_target(link_addr[i][0],(int)stub);
10218     }
10219     else
10220     {
10221       // Internal branch
10222       int target=(link_addr[i][1]-start)>>2;
10223       assert(target>=0&&target<slen);
10224       assert(instr_addr[target]);
10225       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10226       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10227       //#else
10228       set_jump_target(link_addr[i][0],instr_addr[target]);
10229       //#endif
10230     }
10231   }
10232   // External Branch Targets (jump_in)
10233   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10234   for(i=0;i<slen;i++)
10235   {
10236     if(bt[i]||i==0)
10237     {
10238       if(instr_addr[i]) // TODO - delay slots (=null)
10239       {
10240         u_int vaddr=start+i*4;
10241         u_int page=get_page(vaddr);
10242         u_int vpage=get_vpage(vaddr);
10243         literal_pool(256);
10244         {
10245           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10246           assem_debug("jump_in: %x\n",start+i*4);
10247           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10248           int entry_point=do_dirty_stub(i);
10249           ll_add_flags(jump_in+page,vaddr,state_rflags,(void *)entry_point);
10250           // If there was an existing entry in the hash table,
10251           // replace it with the new address.
10252           // Don't add new entries.  We'll insert the
10253           // ones that actually get used in check_addr().
10254           u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10255           if(ht_bin[0]==vaddr) {
10256             ht_bin[1]=entry_point;
10257           }
10258           if(ht_bin[2]==vaddr) {
10259             ht_bin[3]=entry_point;
10260           }
10261         }
10262       }
10263     }
10264   }
10265   // Write out the literal pool if necessary
10266   literal_pool(0);
10267   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10268   // Align code
10269   if(((u_int)out)&7) emit_addnop(13);
10270   #endif
10271   assert((u_int)out-(u_int)beginning<MAX_OUTPUT_BLOCK_SIZE);
10272   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10273   memcpy(copy,source,slen*4);
10274   copy+=slen*4;
10275
10276   end_block(beginning);
10277
10278   // If we're within 256K of the end of the buffer,
10279   // start over from the beginning. (Is 256K enough?)
10280   if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10281
10282   // Trap writes to any of the pages we compiled
10283   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10284     invalid_code[i]=0;
10285   }
10286   inv_code_start=inv_code_end=~0;
10287
10288   // for PCSX we need to mark all mirrors too
10289   if(get_page(start)<(RAM_SIZE>>12))
10290     for(i=start>>12;i<=(start+slen*4)>>12;i++)
10291       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
10292       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
10293       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
10294
10295   /* Pass 10 - Free memory by expiring oldest blocks */
10296
10297   int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10298   while(expirep!=end)
10299   {
10300     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10301     int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10302     inv_debug("EXP: Phase %d\n",expirep);
10303     switch((expirep>>11)&3)
10304     {
10305       case 0:
10306         // Clear jump_in and jump_dirty
10307         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10308         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10309         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10310         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10311         break;
10312       case 1:
10313         // Clear pointers
10314         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10315         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10316         break;
10317       case 2:
10318         // Clear hash table
10319         for(i=0;i<32;i++) {
10320           u_int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10321           if((ht_bin[3]>>shift)==(base>>shift) ||
10322              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10323             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10324             ht_bin[2]=ht_bin[3]=-1;
10325           }
10326           if((ht_bin[1]>>shift)==(base>>shift) ||
10327              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10328             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10329             ht_bin[0]=ht_bin[2];
10330             ht_bin[1]=ht_bin[3];
10331             ht_bin[2]=ht_bin[3]=-1;
10332           }
10333         }
10334         break;
10335       case 3:
10336         // Clear jump_out
10337         #ifdef __arm__
10338         if((expirep&2047)==0)
10339           do_clear_cache();
10340         #endif
10341         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10342         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10343         break;
10344     }
10345     expirep=(expirep+1)&65535;
10346   }
10347   return 0;
10348 }
10349
10350 // vim:shiftwidth=2:expandtab