7646e074d5ee2635077696ccea1f042d98a32557
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26 #ifdef __MACH__
27 #include <libkern/OSCacheControl.h>
28 #endif
29 #ifdef _3DS
30 #include <3ds_utils.h>
31 #endif
32 #ifdef VITA
33 #include <psp2/kernel/sysmem.h>
34 static int sceBlock;
35 #endif
36
37 #include "new_dynarec_config.h"
38 #include "../psxhle.h" //emulator interface
39 #include "emu_if.h" //emulator interface
40
41 //#define DISASM
42 //#define assem_debug printf
43 //#define inv_debug printf
44 #define assem_debug(...)
45 #define inv_debug(...)
46
47 #ifdef __i386__
48 #include "assem_x86.h"
49 #endif
50 #ifdef __x86_64__
51 #include "assem_x64.h"
52 #endif
53 #ifdef __arm__
54 #include "assem_arm.h"
55 #endif
56
57 #define MAXBLOCK 4096
58 #define MAX_OUTPUT_BLOCK_SIZE 262144
59
60 struct regstat
61 {
62   signed char regmap_entry[HOST_REGS];
63   signed char regmap[HOST_REGS];
64   uint64_t was32;
65   uint64_t is32;
66   uint64_t wasdirty;
67   uint64_t dirty;
68   uint64_t u;
69   uint64_t uu;
70   u_int wasconst;
71   u_int isconst;
72   u_int loadedconst;             // host regs that have constants loaded
73   u_int waswritten;              // MIPS regs that were used as store base before
74 };
75
76 // note: asm depends on this layout
77 struct ll_entry
78 {
79   u_int vaddr;
80   u_int reg_sv_flags;
81   void *addr;
82   struct ll_entry *next;
83 };
84
85   // used by asm:
86   u_char *out;
87   u_int hash_table[65536][4]  __attribute__((aligned(16)));
88   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
89   struct ll_entry *jump_dirty[4096];
90
91   static struct ll_entry *jump_out[4096];
92   static u_int start;
93   static u_int *source;
94   static char insn[MAXBLOCK][10];
95   static u_char itype[MAXBLOCK];
96   static u_char opcode[MAXBLOCK];
97   static u_char opcode2[MAXBLOCK];
98   static u_char bt[MAXBLOCK];
99   static u_char rs1[MAXBLOCK];
100   static u_char rs2[MAXBLOCK];
101   static u_char rt1[MAXBLOCK];
102   static u_char rt2[MAXBLOCK];
103   static u_char us1[MAXBLOCK];
104   static u_char us2[MAXBLOCK];
105   static u_char dep1[MAXBLOCK];
106   static u_char dep2[MAXBLOCK];
107   static u_char lt1[MAXBLOCK];
108   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
109   static uint64_t gte_rt[MAXBLOCK];
110   static uint64_t gte_unneeded[MAXBLOCK];
111   static u_int smrv[32]; // speculated MIPS register values
112   static u_int smrv_strong; // mask or regs that are likely to have correct values
113   static u_int smrv_weak; // same, but somewhat less likely
114   static u_int smrv_strong_next; // same, but after current insn executes
115   static u_int smrv_weak_next;
116   static int imm[MAXBLOCK];
117   static u_int ba[MAXBLOCK];
118   static char likely[MAXBLOCK];
119   static char is_ds[MAXBLOCK];
120   static char ooo[MAXBLOCK];
121   static uint64_t unneeded_reg[MAXBLOCK];
122   static uint64_t unneeded_reg_upper[MAXBLOCK];
123   static uint64_t branch_unneeded_reg[MAXBLOCK];
124   static uint64_t branch_unneeded_reg_upper[MAXBLOCK];
125   static signed char regmap_pre[MAXBLOCK][HOST_REGS];
126   static uint64_t current_constmap[HOST_REGS];
127   static uint64_t constmap[MAXBLOCK][HOST_REGS];
128   static struct regstat regs[MAXBLOCK];
129   static struct regstat branch_regs[MAXBLOCK];
130   static signed char minimum_free_regs[MAXBLOCK];
131   static u_int needed_reg[MAXBLOCK];
132   static u_int wont_dirty[MAXBLOCK];
133   static u_int will_dirty[MAXBLOCK];
134   static int ccadj[MAXBLOCK];
135   static int slen;
136   static u_int instr_addr[MAXBLOCK];
137   static u_int link_addr[MAXBLOCK][3];
138   static int linkcount;
139   static u_int stubs[MAXBLOCK*3][8];
140   static int stubcount;
141   static u_int literals[1024][2];
142   static int literalcount;
143   static int is_delayslot;
144   static int cop1_usable;
145   static char shadow[1048576]  __attribute__((aligned(16)));
146   static void *copy;
147   static int expirep;
148   static u_int stop_after_jal;
149 #ifndef RAM_FIXED
150   static u_int ram_offset;
151 #else
152   static const u_int ram_offset=0;
153 #endif
154
155   int new_dynarec_hacks;
156   int new_dynarec_did_compile;
157   extern u_char restore_candidate[512];
158   extern int cycle_count;
159
160   /* registers that may be allocated */
161   /* 1-31 gpr */
162 #define HIREG 32 // hi
163 #define LOREG 33 // lo
164 #define FSREG 34 // FPU status (FCSR)
165 #define CSREG 35 // Coprocessor status
166 #define CCREG 36 // Cycle count
167 #define INVCP 37 // Pointer to invalid_code
168 //#define MMREG 38 // Pointer to memory_map
169 #define ROREG 39 // ram offset (if rdram!=0x80000000)
170 #define TEMPREG 40
171 #define FTEMP 40 // FPU temporary register
172 #define PTEMP 41 // Prefetch temporary register
173 //#define TLREG 42 // TLB mapping offset
174 #define RHASH 43 // Return address hash
175 #define RHTBL 44 // Return address hash table address
176 #define RTEMP 45 // JR/JALR address register
177 #define MAXREG 45
178 #define AGEN1 46 // Address generation temporary register
179 //#define AGEN2 47 // Address generation temporary register
180 //#define MGEN1 48 // Maptable address generation temporary register
181 //#define MGEN2 49 // Maptable address generation temporary register
182 #define BTREG 50 // Branch target temporary register
183
184   /* instruction types */
185 #define NOP 0     // No operation
186 #define LOAD 1    // Load
187 #define STORE 2   // Store
188 #define LOADLR 3  // Unaligned load
189 #define STORELR 4 // Unaligned store
190 #define MOV 5     // Move
191 #define ALU 6     // Arithmetic/logic
192 #define MULTDIV 7 // Multiply/divide
193 #define SHIFT 8   // Shift by register
194 #define SHIFTIMM 9// Shift by immediate
195 #define IMM16 10  // 16-bit immediate
196 #define RJUMP 11  // Unconditional jump to register
197 #define UJUMP 12  // Unconditional jump
198 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
199 #define SJUMP 14  // Conditional branch (regimm format)
200 #define COP0 15   // Coprocessor 0
201 #define COP1 16   // Coprocessor 1
202 #define C1LS 17   // Coprocessor 1 load/store
203 #define FJUMP 18  // Conditional branch (floating point)
204 #define FLOAT 19  // Floating point unit
205 #define FCONV 20  // Convert integer to float
206 #define FCOMP 21  // Floating point compare (sets FSREG)
207 #define SYSCALL 22// SYSCALL
208 #define OTHER 23  // Other
209 #define SPAN 24   // Branch/delay slot spans 2 pages
210 #define NI 25     // Not implemented
211 #define HLECALL 26// PCSX fake opcodes for HLE
212 #define COP2 27   // Coprocessor 2 move
213 #define C2LS 28   // Coprocessor 2 load/store
214 #define C2OP 29   // Coprocessor 2 operation
215 #define INTCALL 30// Call interpreter to handle rare corner cases
216
217   /* stubs */
218 #define CC_STUB 1
219 #define FP_STUB 2
220 #define LOADB_STUB 3
221 #define LOADH_STUB 4
222 #define LOADW_STUB 5
223 #define LOADD_STUB 6
224 #define LOADBU_STUB 7
225 #define LOADHU_STUB 8
226 #define STOREB_STUB 9
227 #define STOREH_STUB 10
228 #define STOREW_STUB 11
229 #define STORED_STUB 12
230 #define STORELR_STUB 13
231 #define INVCODE_STUB 14
232
233   /* branch codes */
234 #define TAKEN 1
235 #define NOTTAKEN 2
236 #define NULLDS 3
237
238 // asm linkage
239 int new_recompile_block(int addr);
240 void *get_addr_ht(u_int vaddr);
241 void invalidate_block(u_int block);
242 void invalidate_addr(u_int addr);
243 void remove_hash(int vaddr);
244 void dyna_linker();
245 void dyna_linker_ds();
246 void verify_code();
247 void verify_code_vm();
248 void verify_code_ds();
249 void cc_interrupt();
250 void fp_exception();
251 void fp_exception_ds();
252 void jump_syscall_hle();
253 void jump_hlecall();
254 void jump_intcall();
255 void new_dyna_leave();
256
257 // Needed by assembler
258 static void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
259 static void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
260 static void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
261 static void load_all_regs(signed char i_regmap[]);
262 static void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
263 static void load_regs_entry(int t);
264 static void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
265
266 static int verify_dirty(u_int *ptr);
267 static int get_final_value(int hr, int i, int *value);
268 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e);
269 static void add_to_linker(int addr,int target,int ext);
270
271 static int tracedebug=0;
272
273 static void mprotect_w_x(void *start, void *end, int is_x)
274 {
275 #ifdef NO_WRITE_EXEC
276   #if defined(VITA)
277   // *Open* enables write on all memory that was
278   // allocated by sceKernelAllocMemBlockForVM()?
279   if (is_x)
280     sceKernelCloseVMDomain();
281   else
282     sceKernelOpenVMDomain();
283   #else
284   u_long mstart = (u_long)start & ~4095ul;
285   u_long mend = (u_long)end;
286   if (mprotect((void *)mstart, mend - mstart,
287                PROT_READ | (is_x ? PROT_EXEC : PROT_WRITE)) != 0)
288     SysPrintf("mprotect(%c) failed: %s\n", is_x ? 'x' : 'w', strerror(errno));
289   #endif
290 #endif
291 }
292
293 static void start_tcache_write(void *start, void *end)
294 {
295   mprotect_w_x(start, end, 0);
296 }
297
298 static void end_tcache_write(void *start, void *end)
299 {
300 #ifdef __arm__
301   size_t len = (char *)end - (char *)start;
302   #if   defined(__BLACKBERRY_QNX__)
303   msync(start, len, MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
304   #elif defined(__MACH__)
305   sys_cache_control(kCacheFunctionPrepareForExecution, start, len);
306   #elif defined(VITA)
307   sceKernelSyncVMDomain(sceBlock, start, len);
308   #elif defined(_3DS)
309   ctr_flush_invalidate_cache();
310   #else
311   __clear_cache(start, end);
312   #endif
313   (void)len;
314 #endif
315
316   mprotect_w_x(start, end, 1);
317 }
318
319 static void *start_block(void)
320 {
321   u_char *end = out + MAX_OUTPUT_BLOCK_SIZE;
322   if (end > (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2))
323     end = (u_char *)BASE_ADDR + (1<<TARGET_SIZE_2);
324   start_tcache_write(out, end);
325   return out;
326 }
327
328 static void end_block(void *start)
329 {
330   end_tcache_write(start, out);
331 }
332
333 //#define DEBUG_CYCLE_COUNT 1
334
335 #define NO_CYCLE_PENALTY_THR 12
336
337 int cycle_multiplier; // 100 for 1.0
338
339 static int CLOCK_ADJUST(int x)
340 {
341   int s=(x>>31)|1;
342   return (x * cycle_multiplier + s * 50) / 100;
343 }
344
345 static u_int get_page(u_int vaddr)
346 {
347   u_int page=vaddr&~0xe0000000;
348   if (page < 0x1000000)
349     page &= ~0x0e00000; // RAM mirrors
350   page>>=12;
351   if(page>2048) page=2048+(page&2047);
352   return page;
353 }
354
355 // no virtual mem in PCSX
356 static u_int get_vpage(u_int vaddr)
357 {
358   return get_page(vaddr);
359 }
360
361 // Get address from virtual address
362 // This is called from the recompiled JR/JALR instructions
363 void *get_addr(u_int vaddr)
364 {
365   u_int page=get_page(vaddr);
366   u_int vpage=get_vpage(vaddr);
367   struct ll_entry *head;
368   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
369   head=jump_in[page];
370   while(head!=NULL) {
371     if(head->vaddr==vaddr) {
372   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
373       u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
374       ht_bin[3]=ht_bin[1];
375       ht_bin[2]=ht_bin[0];
376       ht_bin[1]=(u_int)head->addr;
377       ht_bin[0]=vaddr;
378       return head->addr;
379     }
380     head=head->next;
381   }
382   head=jump_dirty[vpage];
383   while(head!=NULL) {
384     if(head->vaddr==vaddr) {
385       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
386       // Don't restore blocks which are about to expire from the cache
387       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
388       if(verify_dirty(head->addr)) {
389         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
390         invalid_code[vaddr>>12]=0;
391         inv_code_start=inv_code_end=~0;
392         if(vpage<2048) {
393           restore_candidate[vpage>>3]|=1<<(vpage&7);
394         }
395         else restore_candidate[page>>3]|=1<<(page&7);
396         u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
397         if(ht_bin[0]==vaddr) {
398           ht_bin[1]=(u_int)head->addr; // Replace existing entry
399         }
400         else
401         {
402           ht_bin[3]=ht_bin[1];
403           ht_bin[2]=ht_bin[0];
404           ht_bin[1]=(int)head->addr;
405           ht_bin[0]=vaddr;
406         }
407         return head->addr;
408       }
409     }
410     head=head->next;
411   }
412   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
413   int r=new_recompile_block(vaddr);
414   if(r==0) return get_addr(vaddr);
415   // Execute in unmapped page, generate pagefault execption
416   Status|=2;
417   Cause=(vaddr<<31)|0x8;
418   EPC=(vaddr&1)?vaddr-5:vaddr;
419   BadVAddr=(vaddr&~1);
420   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
421   EntryHi=BadVAddr&0xFFFFE000;
422   return get_addr_ht(0x80000000);
423 }
424 // Look up address in hash table first
425 void *get_addr_ht(u_int vaddr)
426 {
427   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
428   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
429   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
430   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
431   return get_addr(vaddr);
432 }
433
434 void clear_all_regs(signed char regmap[])
435 {
436   int hr;
437   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
438 }
439
440 signed char get_reg(signed char regmap[],int r)
441 {
442   int hr;
443   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
444   return -1;
445 }
446
447 // Find a register that is available for two consecutive cycles
448 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
449 {
450   int hr;
451   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
452   return -1;
453 }
454
455 int count_free_regs(signed char regmap[])
456 {
457   int count=0;
458   int hr;
459   for(hr=0;hr<HOST_REGS;hr++)
460   {
461     if(hr!=EXCLUDE_REG) {
462       if(regmap[hr]<0) count++;
463     }
464   }
465   return count;
466 }
467
468 void dirty_reg(struct regstat *cur,signed char reg)
469 {
470   int hr;
471   if(!reg) return;
472   for (hr=0;hr<HOST_REGS;hr++) {
473     if((cur->regmap[hr]&63)==reg) {
474       cur->dirty|=1<<hr;
475     }
476   }
477 }
478
479 // If we dirty the lower half of a 64 bit register which is now being
480 // sign-extended, we need to dump the upper half.
481 // Note: Do this only after completion of the instruction, because
482 // some instructions may need to read the full 64-bit value even if
483 // overwriting it (eg SLTI, DSRA32).
484 static void flush_dirty_uppers(struct regstat *cur)
485 {
486   int hr,reg;
487   for (hr=0;hr<HOST_REGS;hr++) {
488     if((cur->dirty>>hr)&1) {
489       reg=cur->regmap[hr];
490       if(reg>=64)
491         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
492     }
493   }
494 }
495
496 void set_const(struct regstat *cur,signed char reg,uint64_t value)
497 {
498   int hr;
499   if(!reg) return;
500   for (hr=0;hr<HOST_REGS;hr++) {
501     if(cur->regmap[hr]==reg) {
502       cur->isconst|=1<<hr;
503       current_constmap[hr]=value;
504     }
505     else if((cur->regmap[hr]^64)==reg) {
506       cur->isconst|=1<<hr;
507       current_constmap[hr]=value>>32;
508     }
509   }
510 }
511
512 void clear_const(struct regstat *cur,signed char reg)
513 {
514   int hr;
515   if(!reg) return;
516   for (hr=0;hr<HOST_REGS;hr++) {
517     if((cur->regmap[hr]&63)==reg) {
518       cur->isconst&=~(1<<hr);
519     }
520   }
521 }
522
523 int is_const(struct regstat *cur,signed char reg)
524 {
525   int hr;
526   if(reg<0) return 0;
527   if(!reg) return 1;
528   for (hr=0;hr<HOST_REGS;hr++) {
529     if((cur->regmap[hr]&63)==reg) {
530       return (cur->isconst>>hr)&1;
531     }
532   }
533   return 0;
534 }
535 uint64_t get_const(struct regstat *cur,signed char reg)
536 {
537   int hr;
538   if(!reg) return 0;
539   for (hr=0;hr<HOST_REGS;hr++) {
540     if(cur->regmap[hr]==reg) {
541       return current_constmap[hr];
542     }
543   }
544   SysPrintf("Unknown constant in r%d\n",reg);
545   exit(1);
546 }
547
548 // Least soon needed registers
549 // Look at the next ten instructions and see which registers
550 // will be used.  Try not to reallocate these.
551 void lsn(u_char hsn[], int i, int *preferred_reg)
552 {
553   int j;
554   int b=-1;
555   for(j=0;j<9;j++)
556   {
557     if(i+j>=slen) {
558       j=slen-i-1;
559       break;
560     }
561     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
562     {
563       // Don't go past an unconditonal jump
564       j++;
565       break;
566     }
567   }
568   for(;j>=0;j--)
569   {
570     if(rs1[i+j]) hsn[rs1[i+j]]=j;
571     if(rs2[i+j]) hsn[rs2[i+j]]=j;
572     if(rt1[i+j]) hsn[rt1[i+j]]=j;
573     if(rt2[i+j]) hsn[rt2[i+j]]=j;
574     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
575       // Stores can allocate zero
576       hsn[rs1[i+j]]=j;
577       hsn[rs2[i+j]]=j;
578     }
579     // On some architectures stores need invc_ptr
580     #if defined(HOST_IMM8)
581     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
582       hsn[INVCP]=j;
583     }
584     #endif
585     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
586     {
587       hsn[CCREG]=j;
588       b=j;
589     }
590   }
591   if(b>=0)
592   {
593     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
594     {
595       // Follow first branch
596       int t=(ba[i+b]-start)>>2;
597       j=7-b;if(t+j>=slen) j=slen-t-1;
598       for(;j>=0;j--)
599       {
600         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
601         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
602         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
603         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
604       }
605     }
606     // TODO: preferred register based on backward branch
607   }
608   // Delay slot should preferably not overwrite branch conditions or cycle count
609   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
610     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
611     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
612     hsn[CCREG]=1;
613     // ...or hash tables
614     hsn[RHASH]=1;
615     hsn[RHTBL]=1;
616   }
617   // Coprocessor load/store needs FTEMP, even if not declared
618   if(itype[i]==C1LS||itype[i]==C2LS) {
619     hsn[FTEMP]=0;
620   }
621   // Load L/R also uses FTEMP as a temporary register
622   if(itype[i]==LOADLR) {
623     hsn[FTEMP]=0;
624   }
625   // Also SWL/SWR/SDL/SDR
626   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
627     hsn[FTEMP]=0;
628   }
629   // Don't remove the miniht registers
630   if(itype[i]==UJUMP||itype[i]==RJUMP)
631   {
632     hsn[RHASH]=0;
633     hsn[RHTBL]=0;
634   }
635 }
636
637 // We only want to allocate registers if we're going to use them again soon
638 int needed_again(int r, int i)
639 {
640   int j;
641   int b=-1;
642   int rn=10;
643
644   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
645   {
646     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
647       return 0; // Don't need any registers if exiting the block
648   }
649   for(j=0;j<9;j++)
650   {
651     if(i+j>=slen) {
652       j=slen-i-1;
653       break;
654     }
655     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
656     {
657       // Don't go past an unconditonal jump
658       j++;
659       break;
660     }
661     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
662     {
663       break;
664     }
665   }
666   for(;j>=1;j--)
667   {
668     if(rs1[i+j]==r) rn=j;
669     if(rs2[i+j]==r) rn=j;
670     if((unneeded_reg[i+j]>>r)&1) rn=10;
671     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
672     {
673       b=j;
674     }
675   }
676   /*
677   if(b>=0)
678   {
679     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
680     {
681       // Follow first branch
682       int o=rn;
683       int t=(ba[i+b]-start)>>2;
684       j=7-b;if(t+j>=slen) j=slen-t-1;
685       for(;j>=0;j--)
686       {
687         if(!((unneeded_reg[t+j]>>r)&1)) {
688           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
689           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
690         }
691         else rn=o;
692       }
693     }
694   }*/
695   if(rn<10) return 1;
696   (void)b;
697   return 0;
698 }
699
700 // Try to match register allocations at the end of a loop with those
701 // at the beginning
702 int loop_reg(int i, int r, int hr)
703 {
704   int j,k;
705   for(j=0;j<9;j++)
706   {
707     if(i+j>=slen) {
708       j=slen-i-1;
709       break;
710     }
711     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
712     {
713       // Don't go past an unconditonal jump
714       j++;
715       break;
716     }
717   }
718   k=0;
719   if(i>0){
720     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
721       k--;
722   }
723   for(;k<j;k++)
724   {
725     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
726     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
727     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
728     {
729       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
730       {
731         int t=(ba[i+k]-start)>>2;
732         int reg=get_reg(regs[t].regmap_entry,r);
733         if(reg>=0) return reg;
734         //reg=get_reg(regs[t+1].regmap_entry,r);
735         //if(reg>=0) return reg;
736       }
737     }
738   }
739   return hr;
740 }
741
742
743 // Allocate every register, preserving source/target regs
744 void alloc_all(struct regstat *cur,int i)
745 {
746   int hr;
747
748   for(hr=0;hr<HOST_REGS;hr++) {
749     if(hr!=EXCLUDE_REG) {
750       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
751          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
752       {
753         cur->regmap[hr]=-1;
754         cur->dirty&=~(1<<hr);
755       }
756       // Don't need zeros
757       if((cur->regmap[hr]&63)==0)
758       {
759         cur->regmap[hr]=-1;
760         cur->dirty&=~(1<<hr);
761       }
762     }
763   }
764 }
765
766 #ifdef __i386__
767 #include "assem_x86.c"
768 #endif
769 #ifdef __x86_64__
770 #include "assem_x64.c"
771 #endif
772 #ifdef __arm__
773 #include "assem_arm.c"
774 #endif
775
776 // Add virtual address mapping to linked list
777 void ll_add(struct ll_entry **head,int vaddr,void *addr)
778 {
779   struct ll_entry *new_entry;
780   new_entry=malloc(sizeof(struct ll_entry));
781   assert(new_entry!=NULL);
782   new_entry->vaddr=vaddr;
783   new_entry->reg_sv_flags=0;
784   new_entry->addr=addr;
785   new_entry->next=*head;
786   *head=new_entry;
787 }
788
789 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
790 {
791   ll_add(head,vaddr,addr);
792   (*head)->reg_sv_flags=reg_sv_flags;
793 }
794
795 // Check if an address is already compiled
796 // but don't return addresses which are about to expire from the cache
797 void *check_addr(u_int vaddr)
798 {
799   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
800   if(ht_bin[0]==vaddr) {
801     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
802       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
803   }
804   if(ht_bin[2]==vaddr) {
805     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
806       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
807   }
808   u_int page=get_page(vaddr);
809   struct ll_entry *head;
810   head=jump_in[page];
811   while(head!=NULL) {
812     if(head->vaddr==vaddr) {
813       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
814         // Update existing entry with current address
815         if(ht_bin[0]==vaddr) {
816           ht_bin[1]=(int)head->addr;
817           return head->addr;
818         }
819         if(ht_bin[2]==vaddr) {
820           ht_bin[3]=(int)head->addr;
821           return head->addr;
822         }
823         // Insert into hash table with low priority.
824         // Don't evict existing entries, as they are probably
825         // addresses that are being accessed frequently.
826         if(ht_bin[0]==-1) {
827           ht_bin[1]=(int)head->addr;
828           ht_bin[0]=vaddr;
829         }else if(ht_bin[2]==-1) {
830           ht_bin[3]=(int)head->addr;
831           ht_bin[2]=vaddr;
832         }
833         return head->addr;
834       }
835     }
836     head=head->next;
837   }
838   return 0;
839 }
840
841 void remove_hash(int vaddr)
842 {
843   //printf("remove hash: %x\n",vaddr);
844   u_int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
845   if(ht_bin[2]==vaddr) {
846     ht_bin[2]=ht_bin[3]=-1;
847   }
848   if(ht_bin[0]==vaddr) {
849     ht_bin[0]=ht_bin[2];
850     ht_bin[1]=ht_bin[3];
851     ht_bin[2]=ht_bin[3]=-1;
852   }
853 }
854
855 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
856 {
857   struct ll_entry *next;
858   while(*head) {
859     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
860        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
861     {
862       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
863       remove_hash((*head)->vaddr);
864       next=(*head)->next;
865       free(*head);
866       *head=next;
867     }
868     else
869     {
870       head=&((*head)->next);
871     }
872   }
873 }
874
875 // Remove all entries from linked list
876 void ll_clear(struct ll_entry **head)
877 {
878   struct ll_entry *cur;
879   struct ll_entry *next;
880   if((cur=*head)) {
881     *head=0;
882     while(cur) {
883       next=cur->next;
884       free(cur);
885       cur=next;
886     }
887   }
888 }
889
890 // Dereference the pointers and remove if it matches
891 static void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
892 {
893   while(head) {
894     int ptr=get_pointer(head->addr);
895     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
896     if(((ptr>>shift)==(addr>>shift)) ||
897        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
898     {
899       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
900       void *host_addr=find_extjump_insn(head->addr);
901       #ifdef __arm__
902         mark_clear_cache(host_addr);
903       #endif
904       set_jump_target((int)host_addr,(int)head->addr);
905     }
906     head=head->next;
907   }
908 }
909
910 // This is called when we write to a compiled block (see do_invstub)
911 void invalidate_page(u_int page)
912 {
913   struct ll_entry *head;
914   struct ll_entry *next;
915   head=jump_in[page];
916   jump_in[page]=0;
917   while(head!=NULL) {
918     inv_debug("INVALIDATE: %x\n",head->vaddr);
919     remove_hash(head->vaddr);
920     next=head->next;
921     free(head);
922     head=next;
923   }
924   head=jump_out[page];
925   jump_out[page]=0;
926   while(head!=NULL) {
927     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
928     void *host_addr=find_extjump_insn(head->addr);
929     #ifdef __arm__
930       mark_clear_cache(host_addr);
931     #endif
932     set_jump_target((int)host_addr,(int)head->addr);
933     next=head->next;
934     free(head);
935     head=next;
936   }
937 }
938
939 static void invalidate_block_range(u_int block, u_int first, u_int last)
940 {
941   u_int page=get_page(block<<12);
942   //printf("first=%d last=%d\n",first,last);
943   invalidate_page(page);
944   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
945   assert(last<page+5);
946   // Invalidate the adjacent pages if a block crosses a 4K boundary
947   while(first<page) {
948     invalidate_page(first);
949     first++;
950   }
951   for(first=page+1;first<last;first++) {
952     invalidate_page(first);
953   }
954   #ifdef __arm__
955     do_clear_cache();
956   #endif
957
958   // Don't trap writes
959   invalid_code[block]=1;
960
961   #ifdef USE_MINI_HT
962   memset(mini_ht,-1,sizeof(mini_ht));
963   #endif
964 }
965
966 void invalidate_block(u_int block)
967 {
968   u_int page=get_page(block<<12);
969   u_int vpage=get_vpage(block<<12);
970   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
971   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
972   u_int first,last;
973   first=last=page;
974   struct ll_entry *head;
975   head=jump_dirty[vpage];
976   //printf("page=%d vpage=%d\n",page,vpage);
977   while(head!=NULL) {
978     u_int start,end;
979     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
980       get_bounds((int)head->addr,&start,&end);
981       //printf("start: %x end: %x\n",start,end);
982       if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
983         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
984           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
985           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
986         }
987       }
988     }
989     head=head->next;
990   }
991   invalidate_block_range(block,first,last);
992 }
993
994 void invalidate_addr(u_int addr)
995 {
996   //static int rhits;
997   // this check is done by the caller
998   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
999   u_int page=get_vpage(addr);
1000   if(page<2048) { // RAM
1001     struct ll_entry *head;
1002     u_int addr_min=~0, addr_max=0;
1003     u_int mask=RAM_SIZE-1;
1004     u_int addr_main=0x80000000|(addr&mask);
1005     int pg1;
1006     inv_code_start=addr_main&~0xfff;
1007     inv_code_end=addr_main|0xfff;
1008     pg1=page;
1009     if (pg1>0) {
1010       // must check previous page too because of spans..
1011       pg1--;
1012       inv_code_start-=0x1000;
1013     }
1014     for(;pg1<=page;pg1++) {
1015       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1016         u_int start,end;
1017         get_bounds((int)head->addr,&start,&end);
1018         if(ram_offset) {
1019           start-=ram_offset;
1020           end-=ram_offset;
1021         }
1022         if(start<=addr_main&&addr_main<end) {
1023           if(start<addr_min) addr_min=start;
1024           if(end>addr_max) addr_max=end;
1025         }
1026         else if(addr_main<start) {
1027           if(start<inv_code_end)
1028             inv_code_end=start-1;
1029         }
1030         else {
1031           if(end>inv_code_start)
1032             inv_code_start=end;
1033         }
1034       }
1035     }
1036     if (addr_min!=~0) {
1037       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1038       inv_code_start=inv_code_end=~0;
1039       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1040       return;
1041     }
1042     else {
1043       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1044       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1045       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1046       return;
1047     }
1048   }
1049   invalidate_block(addr>>12);
1050 }
1051
1052 // This is called when loading a save state.
1053 // Anything could have changed, so invalidate everything.
1054 void invalidate_all_pages()
1055 {
1056   u_int page;
1057   for(page=0;page<4096;page++)
1058     invalidate_page(page);
1059   for(page=0;page<1048576;page++)
1060     if(!invalid_code[page]) {
1061       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1062       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1063     }
1064   #ifdef USE_MINI_HT
1065   memset(mini_ht,-1,sizeof(mini_ht));
1066   #endif
1067 }
1068
1069 // Add an entry to jump_out after making a link
1070 void add_link(u_int vaddr,void *src)
1071 {
1072   u_int page=get_page(vaddr);
1073   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1074   int *ptr=(int *)(src+4);
1075   assert((*ptr&0x0fff0000)==0x059f0000);
1076   (void)ptr;
1077   ll_add(jump_out+page,vaddr,src);
1078   //int ptr=get_pointer(src);
1079   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1080 }
1081
1082 // If a code block was found to be unmodified (bit was set in
1083 // restore_candidate) and it remains unmodified (bit is clear
1084 // in invalid_code) then move the entries for that 4K page from
1085 // the dirty list to the clean list.
1086 void clean_blocks(u_int page)
1087 {
1088   struct ll_entry *head;
1089   inv_debug("INV: clean_blocks page=%d\n",page);
1090   head=jump_dirty[page];
1091   while(head!=NULL) {
1092     if(!invalid_code[head->vaddr>>12]) {
1093       // Don't restore blocks which are about to expire from the cache
1094       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1095         u_int start,end;
1096         if(verify_dirty(head->addr)) {
1097           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1098           u_int i;
1099           u_int inv=0;
1100           get_bounds((int)head->addr,&start,&end);
1101           if(start-(u_int)rdram<RAM_SIZE) {
1102             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1103               inv|=invalid_code[i];
1104             }
1105           }
1106           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1107             inv=1;
1108           }
1109           if(!inv) {
1110             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1111             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1112               u_int ppage=page;
1113               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1114               //printf("page=%x, addr=%x\n",page,head->vaddr);
1115               //assert(head->vaddr>>12==(page|0x80000));
1116               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1117               u_int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1118               if(ht_bin[0]==head->vaddr) {
1119                 ht_bin[1]=(u_int)clean_addr; // Replace existing entry
1120               }
1121               if(ht_bin[2]==head->vaddr) {
1122                 ht_bin[3]=(u_int)clean_addr; // Replace existing entry
1123               }
1124             }
1125           }
1126         }
1127       }
1128     }
1129     head=head->next;
1130   }
1131 }
1132
1133
1134 void mov_alloc(struct regstat *current,int i)
1135 {
1136   // Note: Don't need to actually alloc the source registers
1137   if((~current->is32>>rs1[i])&1) {
1138     //alloc_reg64(current,i,rs1[i]);
1139     alloc_reg64(current,i,rt1[i]);
1140     current->is32&=~(1LL<<rt1[i]);
1141   } else {
1142     //alloc_reg(current,i,rs1[i]);
1143     alloc_reg(current,i,rt1[i]);
1144     current->is32|=(1LL<<rt1[i]);
1145   }
1146   clear_const(current,rs1[i]);
1147   clear_const(current,rt1[i]);
1148   dirty_reg(current,rt1[i]);
1149 }
1150
1151 void shiftimm_alloc(struct regstat *current,int i)
1152 {
1153   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1154   {
1155     if(rt1[i]) {
1156       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1157       else lt1[i]=rs1[i];
1158       alloc_reg(current,i,rt1[i]);
1159       current->is32|=1LL<<rt1[i];
1160       dirty_reg(current,rt1[i]);
1161       if(is_const(current,rs1[i])) {
1162         int v=get_const(current,rs1[i]);
1163         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1164         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1165         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1166       }
1167       else clear_const(current,rt1[i]);
1168     }
1169   }
1170   else
1171   {
1172     clear_const(current,rs1[i]);
1173     clear_const(current,rt1[i]);
1174   }
1175
1176   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1177   {
1178     if(rt1[i]) {
1179       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1180       alloc_reg64(current,i,rt1[i]);
1181       current->is32&=~(1LL<<rt1[i]);
1182       dirty_reg(current,rt1[i]);
1183     }
1184   }
1185   if(opcode2[i]==0x3c) // DSLL32
1186   {
1187     if(rt1[i]) {
1188       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1189       alloc_reg64(current,i,rt1[i]);
1190       current->is32&=~(1LL<<rt1[i]);
1191       dirty_reg(current,rt1[i]);
1192     }
1193   }
1194   if(opcode2[i]==0x3e) // DSRL32
1195   {
1196     if(rt1[i]) {
1197       alloc_reg64(current,i,rs1[i]);
1198       if(imm[i]==32) {
1199         alloc_reg64(current,i,rt1[i]);
1200         current->is32&=~(1LL<<rt1[i]);
1201       } else {
1202         alloc_reg(current,i,rt1[i]);
1203         current->is32|=1LL<<rt1[i];
1204       }
1205       dirty_reg(current,rt1[i]);
1206     }
1207   }
1208   if(opcode2[i]==0x3f) // DSRA32
1209   {
1210     if(rt1[i]) {
1211       alloc_reg64(current,i,rs1[i]);
1212       alloc_reg(current,i,rt1[i]);
1213       current->is32|=1LL<<rt1[i];
1214       dirty_reg(current,rt1[i]);
1215     }
1216   }
1217 }
1218
1219 void shift_alloc(struct regstat *current,int i)
1220 {
1221   if(rt1[i]) {
1222     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1223     {
1224       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1225       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1226       alloc_reg(current,i,rt1[i]);
1227       if(rt1[i]==rs2[i]) {
1228         alloc_reg_temp(current,i,-1);
1229         minimum_free_regs[i]=1;
1230       }
1231       current->is32|=1LL<<rt1[i];
1232     } else { // DSLLV/DSRLV/DSRAV
1233       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1234       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1235       alloc_reg64(current,i,rt1[i]);
1236       current->is32&=~(1LL<<rt1[i]);
1237       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1238       {
1239         alloc_reg_temp(current,i,-1);
1240         minimum_free_regs[i]=1;
1241       }
1242     }
1243     clear_const(current,rs1[i]);
1244     clear_const(current,rs2[i]);
1245     clear_const(current,rt1[i]);
1246     dirty_reg(current,rt1[i]);
1247   }
1248 }
1249
1250 void alu_alloc(struct regstat *current,int i)
1251 {
1252   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1253     if(rt1[i]) {
1254       if(rs1[i]&&rs2[i]) {
1255         alloc_reg(current,i,rs1[i]);
1256         alloc_reg(current,i,rs2[i]);
1257       }
1258       else {
1259         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1260         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1261       }
1262       alloc_reg(current,i,rt1[i]);
1263     }
1264     current->is32|=1LL<<rt1[i];
1265   }
1266   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1267     if(rt1[i]) {
1268       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1269       {
1270         alloc_reg64(current,i,rs1[i]);
1271         alloc_reg64(current,i,rs2[i]);
1272         alloc_reg(current,i,rt1[i]);
1273       } else {
1274         alloc_reg(current,i,rs1[i]);
1275         alloc_reg(current,i,rs2[i]);
1276         alloc_reg(current,i,rt1[i]);
1277       }
1278     }
1279     current->is32|=1LL<<rt1[i];
1280   }
1281   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1282     if(rt1[i]) {
1283       if(rs1[i]&&rs2[i]) {
1284         alloc_reg(current,i,rs1[i]);
1285         alloc_reg(current,i,rs2[i]);
1286       }
1287       else
1288       {
1289         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1290         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1291       }
1292       alloc_reg(current,i,rt1[i]);
1293       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1294       {
1295         if(!((current->uu>>rt1[i])&1)) {
1296           alloc_reg64(current,i,rt1[i]);
1297         }
1298         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1299           if(rs1[i]&&rs2[i]) {
1300             alloc_reg64(current,i,rs1[i]);
1301             alloc_reg64(current,i,rs2[i]);
1302           }
1303           else
1304           {
1305             // Is is really worth it to keep 64-bit values in registers?
1306             #ifdef NATIVE_64BIT
1307             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1308             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1309             #endif
1310           }
1311         }
1312         current->is32&=~(1LL<<rt1[i]);
1313       } else {
1314         current->is32|=1LL<<rt1[i];
1315       }
1316     }
1317   }
1318   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1319     if(rt1[i]) {
1320       if(rs1[i]&&rs2[i]) {
1321         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1322           alloc_reg64(current,i,rs1[i]);
1323           alloc_reg64(current,i,rs2[i]);
1324           alloc_reg64(current,i,rt1[i]);
1325         } else {
1326           alloc_reg(current,i,rs1[i]);
1327           alloc_reg(current,i,rs2[i]);
1328           alloc_reg(current,i,rt1[i]);
1329         }
1330       }
1331       else {
1332         alloc_reg(current,i,rt1[i]);
1333         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1334           // DADD used as move, or zeroing
1335           // If we have a 64-bit source, then make the target 64 bits too
1336           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1337             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1338             alloc_reg64(current,i,rt1[i]);
1339           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1340             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1341             alloc_reg64(current,i,rt1[i]);
1342           }
1343           if(opcode2[i]>=0x2e&&rs2[i]) {
1344             // DSUB used as negation - 64-bit result
1345             // If we have a 32-bit register, extend it to 64 bits
1346             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1347             alloc_reg64(current,i,rt1[i]);
1348           }
1349         }
1350       }
1351       if(rs1[i]&&rs2[i]) {
1352         current->is32&=~(1LL<<rt1[i]);
1353       } else if(rs1[i]) {
1354         current->is32&=~(1LL<<rt1[i]);
1355         if((current->is32>>rs1[i])&1)
1356           current->is32|=1LL<<rt1[i];
1357       } else if(rs2[i]) {
1358         current->is32&=~(1LL<<rt1[i]);
1359         if((current->is32>>rs2[i])&1)
1360           current->is32|=1LL<<rt1[i];
1361       } else {
1362         current->is32|=1LL<<rt1[i];
1363       }
1364     }
1365   }
1366   clear_const(current,rs1[i]);
1367   clear_const(current,rs2[i]);
1368   clear_const(current,rt1[i]);
1369   dirty_reg(current,rt1[i]);
1370 }
1371
1372 void imm16_alloc(struct regstat *current,int i)
1373 {
1374   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1375   else lt1[i]=rs1[i];
1376   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1377   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1378     current->is32&=~(1LL<<rt1[i]);
1379     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1380       // TODO: Could preserve the 32-bit flag if the immediate is zero
1381       alloc_reg64(current,i,rt1[i]);
1382       alloc_reg64(current,i,rs1[i]);
1383     }
1384     clear_const(current,rs1[i]);
1385     clear_const(current,rt1[i]);
1386   }
1387   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1388     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1389     current->is32|=1LL<<rt1[i];
1390     clear_const(current,rs1[i]);
1391     clear_const(current,rt1[i]);
1392   }
1393   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1394     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1395       if(rs1[i]!=rt1[i]) {
1396         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1397         alloc_reg64(current,i,rt1[i]);
1398         current->is32&=~(1LL<<rt1[i]);
1399       }
1400     }
1401     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1402     if(is_const(current,rs1[i])) {
1403       int v=get_const(current,rs1[i]);
1404       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1405       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1406       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1407     }
1408     else clear_const(current,rt1[i]);
1409   }
1410   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1411     if(is_const(current,rs1[i])) {
1412       int v=get_const(current,rs1[i]);
1413       set_const(current,rt1[i],v+imm[i]);
1414     }
1415     else clear_const(current,rt1[i]);
1416     current->is32|=1LL<<rt1[i];
1417   }
1418   else {
1419     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1420     current->is32|=1LL<<rt1[i];
1421   }
1422   dirty_reg(current,rt1[i]);
1423 }
1424
1425 void load_alloc(struct regstat *current,int i)
1426 {
1427   clear_const(current,rt1[i]);
1428   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1429   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1430   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1431   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1432     alloc_reg(current,i,rt1[i]);
1433     assert(get_reg(current->regmap,rt1[i])>=0);
1434     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1435     {
1436       current->is32&=~(1LL<<rt1[i]);
1437       alloc_reg64(current,i,rt1[i]);
1438     }
1439     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1440     {
1441       current->is32&=~(1LL<<rt1[i]);
1442       alloc_reg64(current,i,rt1[i]);
1443       alloc_all(current,i);
1444       alloc_reg64(current,i,FTEMP);
1445       minimum_free_regs[i]=HOST_REGS;
1446     }
1447     else current->is32|=1LL<<rt1[i];
1448     dirty_reg(current,rt1[i]);
1449     // LWL/LWR need a temporary register for the old value
1450     if(opcode[i]==0x22||opcode[i]==0x26)
1451     {
1452       alloc_reg(current,i,FTEMP);
1453       alloc_reg_temp(current,i,-1);
1454       minimum_free_regs[i]=1;
1455     }
1456   }
1457   else
1458   {
1459     // Load to r0 or unneeded register (dummy load)
1460     // but we still need a register to calculate the address
1461     if(opcode[i]==0x22||opcode[i]==0x26)
1462     {
1463       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1464     }
1465     alloc_reg_temp(current,i,-1);
1466     minimum_free_regs[i]=1;
1467     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1468     {
1469       alloc_all(current,i);
1470       alloc_reg64(current,i,FTEMP);
1471       minimum_free_regs[i]=HOST_REGS;
1472     }
1473   }
1474 }
1475
1476 void store_alloc(struct regstat *current,int i)
1477 {
1478   clear_const(current,rs2[i]);
1479   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1480   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1481   alloc_reg(current,i,rs2[i]);
1482   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1483     alloc_reg64(current,i,rs2[i]);
1484     if(rs2[i]) alloc_reg(current,i,FTEMP);
1485   }
1486   #if defined(HOST_IMM8)
1487   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1488   else alloc_reg(current,i,INVCP);
1489   #endif
1490   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1491     alloc_reg(current,i,FTEMP);
1492   }
1493   // We need a temporary register for address generation
1494   alloc_reg_temp(current,i,-1);
1495   minimum_free_regs[i]=1;
1496 }
1497
1498 void c1ls_alloc(struct regstat *current,int i)
1499 {
1500   //clear_const(current,rs1[i]); // FIXME
1501   clear_const(current,rt1[i]);
1502   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1503   alloc_reg(current,i,CSREG); // Status
1504   alloc_reg(current,i,FTEMP);
1505   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1506     alloc_reg64(current,i,FTEMP);
1507   }
1508   #if defined(HOST_IMM8)
1509   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1510   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1511     alloc_reg(current,i,INVCP);
1512   #endif
1513   // We need a temporary register for address generation
1514   alloc_reg_temp(current,i,-1);
1515 }
1516
1517 void c2ls_alloc(struct regstat *current,int i)
1518 {
1519   clear_const(current,rt1[i]);
1520   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1521   alloc_reg(current,i,FTEMP);
1522   #if defined(HOST_IMM8)
1523   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1524   if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1525     alloc_reg(current,i,INVCP);
1526   #endif
1527   // We need a temporary register for address generation
1528   alloc_reg_temp(current,i,-1);
1529   minimum_free_regs[i]=1;
1530 }
1531
1532 #ifndef multdiv_alloc
1533 void multdiv_alloc(struct regstat *current,int i)
1534 {
1535   //  case 0x18: MULT
1536   //  case 0x19: MULTU
1537   //  case 0x1A: DIV
1538   //  case 0x1B: DIVU
1539   //  case 0x1C: DMULT
1540   //  case 0x1D: DMULTU
1541   //  case 0x1E: DDIV
1542   //  case 0x1F: DDIVU
1543   clear_const(current,rs1[i]);
1544   clear_const(current,rs2[i]);
1545   if(rs1[i]&&rs2[i])
1546   {
1547     if((opcode2[i]&4)==0) // 32-bit
1548     {
1549       current->u&=~(1LL<<HIREG);
1550       current->u&=~(1LL<<LOREG);
1551       alloc_reg(current,i,HIREG);
1552       alloc_reg(current,i,LOREG);
1553       alloc_reg(current,i,rs1[i]);
1554       alloc_reg(current,i,rs2[i]);
1555       current->is32|=1LL<<HIREG;
1556       current->is32|=1LL<<LOREG;
1557       dirty_reg(current,HIREG);
1558       dirty_reg(current,LOREG);
1559     }
1560     else // 64-bit
1561     {
1562       current->u&=~(1LL<<HIREG);
1563       current->u&=~(1LL<<LOREG);
1564       current->uu&=~(1LL<<HIREG);
1565       current->uu&=~(1LL<<LOREG);
1566       alloc_reg64(current,i,HIREG);
1567       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1568       alloc_reg64(current,i,rs1[i]);
1569       alloc_reg64(current,i,rs2[i]);
1570       alloc_all(current,i);
1571       current->is32&=~(1LL<<HIREG);
1572       current->is32&=~(1LL<<LOREG);
1573       dirty_reg(current,HIREG);
1574       dirty_reg(current,LOREG);
1575       minimum_free_regs[i]=HOST_REGS;
1576     }
1577   }
1578   else
1579   {
1580     // Multiply by zero is zero.
1581     // MIPS does not have a divide by zero exception.
1582     // The result is undefined, we return zero.
1583     alloc_reg(current,i,HIREG);
1584     alloc_reg(current,i,LOREG);
1585     current->is32|=1LL<<HIREG;
1586     current->is32|=1LL<<LOREG;
1587     dirty_reg(current,HIREG);
1588     dirty_reg(current,LOREG);
1589   }
1590 }
1591 #endif
1592
1593 void cop0_alloc(struct regstat *current,int i)
1594 {
1595   if(opcode2[i]==0) // MFC0
1596   {
1597     if(rt1[i]) {
1598       clear_const(current,rt1[i]);
1599       alloc_all(current,i);
1600       alloc_reg(current,i,rt1[i]);
1601       current->is32|=1LL<<rt1[i];
1602       dirty_reg(current,rt1[i]);
1603     }
1604   }
1605   else if(opcode2[i]==4) // MTC0
1606   {
1607     if(rs1[i]){
1608       clear_const(current,rs1[i]);
1609       alloc_reg(current,i,rs1[i]);
1610       alloc_all(current,i);
1611     }
1612     else {
1613       alloc_all(current,i); // FIXME: Keep r0
1614       current->u&=~1LL;
1615       alloc_reg(current,i,0);
1616     }
1617   }
1618   else
1619   {
1620     // TLBR/TLBWI/TLBWR/TLBP/ERET
1621     assert(opcode2[i]==0x10);
1622     alloc_all(current,i);
1623   }
1624   minimum_free_regs[i]=HOST_REGS;
1625 }
1626
1627 void cop1_alloc(struct regstat *current,int i)
1628 {
1629   alloc_reg(current,i,CSREG); // Load status
1630   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1631   {
1632     if(rt1[i]){
1633       clear_const(current,rt1[i]);
1634       if(opcode2[i]==1) {
1635         alloc_reg64(current,i,rt1[i]); // DMFC1
1636         current->is32&=~(1LL<<rt1[i]);
1637       }else{
1638         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1639         current->is32|=1LL<<rt1[i];
1640       }
1641       dirty_reg(current,rt1[i]);
1642     }
1643     alloc_reg_temp(current,i,-1);
1644   }
1645   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1646   {
1647     if(rs1[i]){
1648       clear_const(current,rs1[i]);
1649       if(opcode2[i]==5)
1650         alloc_reg64(current,i,rs1[i]); // DMTC1
1651       else
1652         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1653       alloc_reg_temp(current,i,-1);
1654     }
1655     else {
1656       current->u&=~1LL;
1657       alloc_reg(current,i,0);
1658       alloc_reg_temp(current,i,-1);
1659     }
1660   }
1661   minimum_free_regs[i]=1;
1662 }
1663 void fconv_alloc(struct regstat *current,int i)
1664 {
1665   alloc_reg(current,i,CSREG); // Load status
1666   alloc_reg_temp(current,i,-1);
1667   minimum_free_regs[i]=1;
1668 }
1669 void float_alloc(struct regstat *current,int i)
1670 {
1671   alloc_reg(current,i,CSREG); // Load status
1672   alloc_reg_temp(current,i,-1);
1673   minimum_free_regs[i]=1;
1674 }
1675 void c2op_alloc(struct regstat *current,int i)
1676 {
1677   alloc_reg_temp(current,i,-1);
1678 }
1679 void fcomp_alloc(struct regstat *current,int i)
1680 {
1681   alloc_reg(current,i,CSREG); // Load status
1682   alloc_reg(current,i,FSREG); // Load flags
1683   dirty_reg(current,FSREG); // Flag will be modified
1684   alloc_reg_temp(current,i,-1);
1685   minimum_free_regs[i]=1;
1686 }
1687
1688 void syscall_alloc(struct regstat *current,int i)
1689 {
1690   alloc_cc(current,i);
1691   dirty_reg(current,CCREG);
1692   alloc_all(current,i);
1693   minimum_free_regs[i]=HOST_REGS;
1694   current->isconst=0;
1695 }
1696
1697 void delayslot_alloc(struct regstat *current,int i)
1698 {
1699   switch(itype[i]) {
1700     case UJUMP:
1701     case CJUMP:
1702     case SJUMP:
1703     case RJUMP:
1704     case FJUMP:
1705     case SYSCALL:
1706     case HLECALL:
1707     case SPAN:
1708       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1709       SysPrintf("Disabled speculative precompilation\n");
1710       stop_after_jal=1;
1711       break;
1712     case IMM16:
1713       imm16_alloc(current,i);
1714       break;
1715     case LOAD:
1716     case LOADLR:
1717       load_alloc(current,i);
1718       break;
1719     case STORE:
1720     case STORELR:
1721       store_alloc(current,i);
1722       break;
1723     case ALU:
1724       alu_alloc(current,i);
1725       break;
1726     case SHIFT:
1727       shift_alloc(current,i);
1728       break;
1729     case MULTDIV:
1730       multdiv_alloc(current,i);
1731       break;
1732     case SHIFTIMM:
1733       shiftimm_alloc(current,i);
1734       break;
1735     case MOV:
1736       mov_alloc(current,i);
1737       break;
1738     case COP0:
1739       cop0_alloc(current,i);
1740       break;
1741     case COP1:
1742     case COP2:
1743       cop1_alloc(current,i);
1744       break;
1745     case C1LS:
1746       c1ls_alloc(current,i);
1747       break;
1748     case C2LS:
1749       c2ls_alloc(current,i);
1750       break;
1751     case FCONV:
1752       fconv_alloc(current,i);
1753       break;
1754     case FLOAT:
1755       float_alloc(current,i);
1756       break;
1757     case FCOMP:
1758       fcomp_alloc(current,i);
1759       break;
1760     case C2OP:
1761       c2op_alloc(current,i);
1762       break;
1763   }
1764 }
1765
1766 // Special case where a branch and delay slot span two pages in virtual memory
1767 static void pagespan_alloc(struct regstat *current,int i)
1768 {
1769   current->isconst=0;
1770   current->wasconst=0;
1771   regs[i].wasconst=0;
1772   minimum_free_regs[i]=HOST_REGS;
1773   alloc_all(current,i);
1774   alloc_cc(current,i);
1775   dirty_reg(current,CCREG);
1776   if(opcode[i]==3) // JAL
1777   {
1778     alloc_reg(current,i,31);
1779     dirty_reg(current,31);
1780   }
1781   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1782   {
1783     alloc_reg(current,i,rs1[i]);
1784     if (rt1[i]!=0) {
1785       alloc_reg(current,i,rt1[i]);
1786       dirty_reg(current,rt1[i]);
1787     }
1788   }
1789   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
1790   {
1791     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1792     if(rs2[i]) alloc_reg(current,i,rs2[i]);
1793     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1794     {
1795       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1796       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
1797     }
1798   }
1799   else
1800   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
1801   {
1802     if(rs1[i]) alloc_reg(current,i,rs1[i]);
1803     if(!((current->is32>>rs1[i])&1))
1804     {
1805       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1806     }
1807   }
1808   else
1809   if(opcode[i]==0x11) // BC1
1810   {
1811     alloc_reg(current,i,FSREG);
1812     alloc_reg(current,i,CSREG);
1813   }
1814   //else ...
1815 }
1816
1817 static void add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
1818 {
1819   stubs[stubcount][0]=type;
1820   stubs[stubcount][1]=addr;
1821   stubs[stubcount][2]=retaddr;
1822   stubs[stubcount][3]=a;
1823   stubs[stubcount][4]=b;
1824   stubs[stubcount][5]=c;
1825   stubs[stubcount][6]=d;
1826   stubs[stubcount][7]=e;
1827   stubcount++;
1828 }
1829
1830 // Write out a single register
1831 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
1832 {
1833   int hr;
1834   for(hr=0;hr<HOST_REGS;hr++) {
1835     if(hr!=EXCLUDE_REG) {
1836       if((regmap[hr]&63)==r) {
1837         if((dirty>>hr)&1) {
1838           if(regmap[hr]<64) {
1839             emit_storereg(r,hr);
1840           }else{
1841             emit_storereg(r|64,hr);
1842           }
1843         }
1844       }
1845     }
1846   }
1847 }
1848
1849 int mchecksum()
1850 {
1851   //if(!tracedebug) return 0;
1852   int i;
1853   int sum=0;
1854   for(i=0;i<2097152;i++) {
1855     unsigned int temp=sum;
1856     sum<<=1;
1857     sum|=(~temp)>>31;
1858     sum^=((u_int *)rdram)[i];
1859   }
1860   return sum;
1861 }
1862 int rchecksum()
1863 {
1864   int i;
1865   int sum=0;
1866   for(i=0;i<64;i++)
1867     sum^=((u_int *)reg)[i];
1868   return sum;
1869 }
1870 void rlist()
1871 {
1872   int i;
1873   printf("TRACE: ");
1874   for(i=0;i<32;i++)
1875     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
1876   printf("\n");
1877 }
1878
1879 void enabletrace()
1880 {
1881   tracedebug=1;
1882 }
1883
1884 void memdebug(int i)
1885 {
1886   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
1887   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
1888   //rlist();
1889   //if(tracedebug) {
1890   //if(Count>=-2084597794) {
1891   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
1892   //if(0) {
1893     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
1894     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
1895     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
1896     rlist();
1897     #ifdef __i386__
1898     printf("TRACE: %x\n",(&i)[-1]);
1899     #endif
1900     #ifdef __arm__
1901     int j;
1902     printf("TRACE: %x \n",(&j)[10]);
1903     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
1904     #endif
1905     //fflush(stdout);
1906   }
1907   //printf("TRACE: %x\n",(&i)[-1]);
1908 }
1909
1910 void alu_assemble(int i,struct regstat *i_regs)
1911 {
1912   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1913     if(rt1[i]) {
1914       signed char s1,s2,t;
1915       t=get_reg(i_regs->regmap,rt1[i]);
1916       if(t>=0) {
1917         s1=get_reg(i_regs->regmap,rs1[i]);
1918         s2=get_reg(i_regs->regmap,rs2[i]);
1919         if(rs1[i]&&rs2[i]) {
1920           assert(s1>=0);
1921           assert(s2>=0);
1922           if(opcode2[i]&2) emit_sub(s1,s2,t);
1923           else emit_add(s1,s2,t);
1924         }
1925         else if(rs1[i]) {
1926           if(s1>=0) emit_mov(s1,t);
1927           else emit_loadreg(rs1[i],t);
1928         }
1929         else if(rs2[i]) {
1930           if(s2>=0) {
1931             if(opcode2[i]&2) emit_neg(s2,t);
1932             else emit_mov(s2,t);
1933           }
1934           else {
1935             emit_loadreg(rs2[i],t);
1936             if(opcode2[i]&2) emit_neg(t,t);
1937           }
1938         }
1939         else emit_zeroreg(t);
1940       }
1941     }
1942   }
1943   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1944     if(rt1[i]) {
1945       signed char s1l,s2l,s1h,s2h,tl,th;
1946       tl=get_reg(i_regs->regmap,rt1[i]);
1947       th=get_reg(i_regs->regmap,rt1[i]|64);
1948       if(tl>=0) {
1949         s1l=get_reg(i_regs->regmap,rs1[i]);
1950         s2l=get_reg(i_regs->regmap,rs2[i]);
1951         s1h=get_reg(i_regs->regmap,rs1[i]|64);
1952         s2h=get_reg(i_regs->regmap,rs2[i]|64);
1953         if(rs1[i]&&rs2[i]) {
1954           assert(s1l>=0);
1955           assert(s2l>=0);
1956           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
1957           else emit_adds(s1l,s2l,tl);
1958           if(th>=0) {
1959             #ifdef INVERTED_CARRY
1960             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
1961             #else
1962             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
1963             #endif
1964             else emit_add(s1h,s2h,th);
1965           }
1966         }
1967         else if(rs1[i]) {
1968           if(s1l>=0) emit_mov(s1l,tl);
1969           else emit_loadreg(rs1[i],tl);
1970           if(th>=0) {
1971             if(s1h>=0) emit_mov(s1h,th);
1972             else emit_loadreg(rs1[i]|64,th);
1973           }
1974         }
1975         else if(rs2[i]) {
1976           if(s2l>=0) {
1977             if(opcode2[i]&2) emit_negs(s2l,tl);
1978             else emit_mov(s2l,tl);
1979           }
1980           else {
1981             emit_loadreg(rs2[i],tl);
1982             if(opcode2[i]&2) emit_negs(tl,tl);
1983           }
1984           if(th>=0) {
1985             #ifdef INVERTED_CARRY
1986             if(s2h>=0) emit_mov(s2h,th);
1987             else emit_loadreg(rs2[i]|64,th);
1988             if(opcode2[i]&2) {
1989               emit_adcimm(-1,th); // x86 has inverted carry flag
1990               emit_not(th,th);
1991             }
1992             #else
1993             if(opcode2[i]&2) {
1994               if(s2h>=0) emit_rscimm(s2h,0,th);
1995               else {
1996                 emit_loadreg(rs2[i]|64,th);
1997                 emit_rscimm(th,0,th);
1998               }
1999             }else{
2000               if(s2h>=0) emit_mov(s2h,th);
2001               else emit_loadreg(rs2[i]|64,th);
2002             }
2003             #endif
2004           }
2005         }
2006         else {
2007           emit_zeroreg(tl);
2008           if(th>=0) emit_zeroreg(th);
2009         }
2010       }
2011     }
2012   }
2013   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2014     if(rt1[i]) {
2015       signed char s1l,s1h,s2l,s2h,t;
2016       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2017       {
2018         t=get_reg(i_regs->regmap,rt1[i]);
2019         //assert(t>=0);
2020         if(t>=0) {
2021           s1l=get_reg(i_regs->regmap,rs1[i]);
2022           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2023           s2l=get_reg(i_regs->regmap,rs2[i]);
2024           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2025           if(rs2[i]==0) // rx<r0
2026           {
2027             assert(s1h>=0);
2028             if(opcode2[i]==0x2a) // SLT
2029               emit_shrimm(s1h,31,t);
2030             else // SLTU (unsigned can not be less than zero)
2031               emit_zeroreg(t);
2032           }
2033           else if(rs1[i]==0) // r0<rx
2034           {
2035             assert(s2h>=0);
2036             if(opcode2[i]==0x2a) // SLT
2037               emit_set_gz64_32(s2h,s2l,t);
2038             else // SLTU (set if not zero)
2039               emit_set_nz64_32(s2h,s2l,t);
2040           }
2041           else {
2042             assert(s1l>=0);assert(s1h>=0);
2043             assert(s2l>=0);assert(s2h>=0);
2044             if(opcode2[i]==0x2a) // SLT
2045               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2046             else // SLTU
2047               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2048           }
2049         }
2050       } else {
2051         t=get_reg(i_regs->regmap,rt1[i]);
2052         //assert(t>=0);
2053         if(t>=0) {
2054           s1l=get_reg(i_regs->regmap,rs1[i]);
2055           s2l=get_reg(i_regs->regmap,rs2[i]);
2056           if(rs2[i]==0) // rx<r0
2057           {
2058             assert(s1l>=0);
2059             if(opcode2[i]==0x2a) // SLT
2060               emit_shrimm(s1l,31,t);
2061             else // SLTU (unsigned can not be less than zero)
2062               emit_zeroreg(t);
2063           }
2064           else if(rs1[i]==0) // r0<rx
2065           {
2066             assert(s2l>=0);
2067             if(opcode2[i]==0x2a) // SLT
2068               emit_set_gz32(s2l,t);
2069             else // SLTU (set if not zero)
2070               emit_set_nz32(s2l,t);
2071           }
2072           else{
2073             assert(s1l>=0);assert(s2l>=0);
2074             if(opcode2[i]==0x2a) // SLT
2075               emit_set_if_less32(s1l,s2l,t);
2076             else // SLTU
2077               emit_set_if_carry32(s1l,s2l,t);
2078           }
2079         }
2080       }
2081     }
2082   }
2083   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2084     if(rt1[i]) {
2085       signed char s1l,s1h,s2l,s2h,th,tl;
2086       tl=get_reg(i_regs->regmap,rt1[i]);
2087       th=get_reg(i_regs->regmap,rt1[i]|64);
2088       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2089       {
2090         assert(tl>=0);
2091         if(tl>=0) {
2092           s1l=get_reg(i_regs->regmap,rs1[i]);
2093           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2094           s2l=get_reg(i_regs->regmap,rs2[i]);
2095           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2096           if(rs1[i]&&rs2[i]) {
2097             assert(s1l>=0);assert(s1h>=0);
2098             assert(s2l>=0);assert(s2h>=0);
2099             if(opcode2[i]==0x24) { // AND
2100               emit_and(s1l,s2l,tl);
2101               emit_and(s1h,s2h,th);
2102             } else
2103             if(opcode2[i]==0x25) { // OR
2104               emit_or(s1l,s2l,tl);
2105               emit_or(s1h,s2h,th);
2106             } else
2107             if(opcode2[i]==0x26) { // XOR
2108               emit_xor(s1l,s2l,tl);
2109               emit_xor(s1h,s2h,th);
2110             } else
2111             if(opcode2[i]==0x27) { // NOR
2112               emit_or(s1l,s2l,tl);
2113               emit_or(s1h,s2h,th);
2114               emit_not(tl,tl);
2115               emit_not(th,th);
2116             }
2117           }
2118           else
2119           {
2120             if(opcode2[i]==0x24) { // AND
2121               emit_zeroreg(tl);
2122               emit_zeroreg(th);
2123             } else
2124             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2125               if(rs1[i]){
2126                 if(s1l>=0) emit_mov(s1l,tl);
2127                 else emit_loadreg(rs1[i],tl);
2128                 if(s1h>=0) emit_mov(s1h,th);
2129                 else emit_loadreg(rs1[i]|64,th);
2130               }
2131               else
2132               if(rs2[i]){
2133                 if(s2l>=0) emit_mov(s2l,tl);
2134                 else emit_loadreg(rs2[i],tl);
2135                 if(s2h>=0) emit_mov(s2h,th);
2136                 else emit_loadreg(rs2[i]|64,th);
2137               }
2138               else{
2139                 emit_zeroreg(tl);
2140                 emit_zeroreg(th);
2141               }
2142             } else
2143             if(opcode2[i]==0x27) { // NOR
2144               if(rs1[i]){
2145                 if(s1l>=0) emit_not(s1l,tl);
2146                 else{
2147                   emit_loadreg(rs1[i],tl);
2148                   emit_not(tl,tl);
2149                 }
2150                 if(s1h>=0) emit_not(s1h,th);
2151                 else{
2152                   emit_loadreg(rs1[i]|64,th);
2153                   emit_not(th,th);
2154                 }
2155               }
2156               else
2157               if(rs2[i]){
2158                 if(s2l>=0) emit_not(s2l,tl);
2159                 else{
2160                   emit_loadreg(rs2[i],tl);
2161                   emit_not(tl,tl);
2162                 }
2163                 if(s2h>=0) emit_not(s2h,th);
2164                 else{
2165                   emit_loadreg(rs2[i]|64,th);
2166                   emit_not(th,th);
2167                 }
2168               }
2169               else {
2170                 emit_movimm(-1,tl);
2171                 emit_movimm(-1,th);
2172               }
2173             }
2174           }
2175         }
2176       }
2177       else
2178       {
2179         // 32 bit
2180         if(tl>=0) {
2181           s1l=get_reg(i_regs->regmap,rs1[i]);
2182           s2l=get_reg(i_regs->regmap,rs2[i]);
2183           if(rs1[i]&&rs2[i]) {
2184             assert(s1l>=0);
2185             assert(s2l>=0);
2186             if(opcode2[i]==0x24) { // AND
2187               emit_and(s1l,s2l,tl);
2188             } else
2189             if(opcode2[i]==0x25) { // OR
2190               emit_or(s1l,s2l,tl);
2191             } else
2192             if(opcode2[i]==0x26) { // XOR
2193               emit_xor(s1l,s2l,tl);
2194             } else
2195             if(opcode2[i]==0x27) { // NOR
2196               emit_or(s1l,s2l,tl);
2197               emit_not(tl,tl);
2198             }
2199           }
2200           else
2201           {
2202             if(opcode2[i]==0x24) { // AND
2203               emit_zeroreg(tl);
2204             } else
2205             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2206               if(rs1[i]){
2207                 if(s1l>=0) emit_mov(s1l,tl);
2208                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2209               }
2210               else
2211               if(rs2[i]){
2212                 if(s2l>=0) emit_mov(s2l,tl);
2213                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2214               }
2215               else emit_zeroreg(tl);
2216             } else
2217             if(opcode2[i]==0x27) { // NOR
2218               if(rs1[i]){
2219                 if(s1l>=0) emit_not(s1l,tl);
2220                 else {
2221                   emit_loadreg(rs1[i],tl);
2222                   emit_not(tl,tl);
2223                 }
2224               }
2225               else
2226               if(rs2[i]){
2227                 if(s2l>=0) emit_not(s2l,tl);
2228                 else {
2229                   emit_loadreg(rs2[i],tl);
2230                   emit_not(tl,tl);
2231                 }
2232               }
2233               else emit_movimm(-1,tl);
2234             }
2235           }
2236         }
2237       }
2238     }
2239   }
2240 }
2241
2242 void imm16_assemble(int i,struct regstat *i_regs)
2243 {
2244   if (opcode[i]==0x0f) { // LUI
2245     if(rt1[i]) {
2246       signed char t;
2247       t=get_reg(i_regs->regmap,rt1[i]);
2248       //assert(t>=0);
2249       if(t>=0) {
2250         if(!((i_regs->isconst>>t)&1))
2251           emit_movimm(imm[i]<<16,t);
2252       }
2253     }
2254   }
2255   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2256     if(rt1[i]) {
2257       signed char s,t;
2258       t=get_reg(i_regs->regmap,rt1[i]);
2259       s=get_reg(i_regs->regmap,rs1[i]);
2260       if(rs1[i]) {
2261         //assert(t>=0);
2262         //assert(s>=0);
2263         if(t>=0) {
2264           if(!((i_regs->isconst>>t)&1)) {
2265             if(s<0) {
2266               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2267               emit_addimm(t,imm[i],t);
2268             }else{
2269               if(!((i_regs->wasconst>>s)&1))
2270                 emit_addimm(s,imm[i],t);
2271               else
2272                 emit_movimm(constmap[i][s]+imm[i],t);
2273             }
2274           }
2275         }
2276       } else {
2277         if(t>=0) {
2278           if(!((i_regs->isconst>>t)&1))
2279             emit_movimm(imm[i],t);
2280         }
2281       }
2282     }
2283   }
2284   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2285     if(rt1[i]) {
2286       signed char sh,sl,th,tl;
2287       th=get_reg(i_regs->regmap,rt1[i]|64);
2288       tl=get_reg(i_regs->regmap,rt1[i]);
2289       sh=get_reg(i_regs->regmap,rs1[i]|64);
2290       sl=get_reg(i_regs->regmap,rs1[i]);
2291       if(tl>=0) {
2292         if(rs1[i]) {
2293           assert(sh>=0);
2294           assert(sl>=0);
2295           if(th>=0) {
2296             emit_addimm64_32(sh,sl,imm[i],th,tl);
2297           }
2298           else {
2299             emit_addimm(sl,imm[i],tl);
2300           }
2301         } else {
2302           emit_movimm(imm[i],tl);
2303           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2304         }
2305       }
2306     }
2307   }
2308   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2309     if(rt1[i]) {
2310       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2311       signed char sh,sl,t;
2312       t=get_reg(i_regs->regmap,rt1[i]);
2313       sh=get_reg(i_regs->regmap,rs1[i]|64);
2314       sl=get_reg(i_regs->regmap,rs1[i]);
2315       //assert(t>=0);
2316       if(t>=0) {
2317         if(rs1[i]>0) {
2318           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2319           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2320             if(opcode[i]==0x0a) { // SLTI
2321               if(sl<0) {
2322                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2323                 emit_slti32(t,imm[i],t);
2324               }else{
2325                 emit_slti32(sl,imm[i],t);
2326               }
2327             }
2328             else { // SLTIU
2329               if(sl<0) {
2330                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2331                 emit_sltiu32(t,imm[i],t);
2332               }else{
2333                 emit_sltiu32(sl,imm[i],t);
2334               }
2335             }
2336           }else{ // 64-bit
2337             assert(sl>=0);
2338             if(opcode[i]==0x0a) // SLTI
2339               emit_slti64_32(sh,sl,imm[i],t);
2340             else // SLTIU
2341               emit_sltiu64_32(sh,sl,imm[i],t);
2342           }
2343         }else{
2344           // SLTI(U) with r0 is just stupid,
2345           // nonetheless examples can be found
2346           if(opcode[i]==0x0a) // SLTI
2347             if(0<imm[i]) emit_movimm(1,t);
2348             else emit_zeroreg(t);
2349           else // SLTIU
2350           {
2351             if(imm[i]) emit_movimm(1,t);
2352             else emit_zeroreg(t);
2353           }
2354         }
2355       }
2356     }
2357   }
2358   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2359     if(rt1[i]) {
2360       signed char sh,sl,th,tl;
2361       th=get_reg(i_regs->regmap,rt1[i]|64);
2362       tl=get_reg(i_regs->regmap,rt1[i]);
2363       sh=get_reg(i_regs->regmap,rs1[i]|64);
2364       sl=get_reg(i_regs->regmap,rs1[i]);
2365       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2366         if(opcode[i]==0x0c) //ANDI
2367         {
2368           if(rs1[i]) {
2369             if(sl<0) {
2370               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2371               emit_andimm(tl,imm[i],tl);
2372             }else{
2373               if(!((i_regs->wasconst>>sl)&1))
2374                 emit_andimm(sl,imm[i],tl);
2375               else
2376                 emit_movimm(constmap[i][sl]&imm[i],tl);
2377             }
2378           }
2379           else
2380             emit_zeroreg(tl);
2381           if(th>=0) emit_zeroreg(th);
2382         }
2383         else
2384         {
2385           if(rs1[i]) {
2386             if(sl<0) {
2387               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2388             }
2389             if(th>=0) {
2390               if(sh<0) {
2391                 emit_loadreg(rs1[i]|64,th);
2392               }else{
2393                 emit_mov(sh,th);
2394               }
2395             }
2396             if(opcode[i]==0x0d) { // ORI
2397               if(sl<0) {
2398                 emit_orimm(tl,imm[i],tl);
2399               }else{
2400                 if(!((i_regs->wasconst>>sl)&1))
2401                   emit_orimm(sl,imm[i],tl);
2402                 else
2403                   emit_movimm(constmap[i][sl]|imm[i],tl);
2404               }
2405             }
2406             if(opcode[i]==0x0e) { // XORI
2407               if(sl<0) {
2408                 emit_xorimm(tl,imm[i],tl);
2409               }else{
2410                 if(!((i_regs->wasconst>>sl)&1))
2411                   emit_xorimm(sl,imm[i],tl);
2412                 else
2413                   emit_movimm(constmap[i][sl]^imm[i],tl);
2414               }
2415             }
2416           }
2417           else {
2418             emit_movimm(imm[i],tl);
2419             if(th>=0) emit_zeroreg(th);
2420           }
2421         }
2422       }
2423     }
2424   }
2425 }
2426
2427 void shiftimm_assemble(int i,struct regstat *i_regs)
2428 {
2429   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2430   {
2431     if(rt1[i]) {
2432       signed char s,t;
2433       t=get_reg(i_regs->regmap,rt1[i]);
2434       s=get_reg(i_regs->regmap,rs1[i]);
2435       //assert(t>=0);
2436       if(t>=0&&!((i_regs->isconst>>t)&1)){
2437         if(rs1[i]==0)
2438         {
2439           emit_zeroreg(t);
2440         }
2441         else
2442         {
2443           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2444           if(imm[i]) {
2445             if(opcode2[i]==0) // SLL
2446             {
2447               emit_shlimm(s<0?t:s,imm[i],t);
2448             }
2449             if(opcode2[i]==2) // SRL
2450             {
2451               emit_shrimm(s<0?t:s,imm[i],t);
2452             }
2453             if(opcode2[i]==3) // SRA
2454             {
2455               emit_sarimm(s<0?t:s,imm[i],t);
2456             }
2457           }else{
2458             // Shift by zero
2459             if(s>=0 && s!=t) emit_mov(s,t);
2460           }
2461         }
2462       }
2463       //emit_storereg(rt1[i],t); //DEBUG
2464     }
2465   }
2466   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2467   {
2468     if(rt1[i]) {
2469       signed char sh,sl,th,tl;
2470       th=get_reg(i_regs->regmap,rt1[i]|64);
2471       tl=get_reg(i_regs->regmap,rt1[i]);
2472       sh=get_reg(i_regs->regmap,rs1[i]|64);
2473       sl=get_reg(i_regs->regmap,rs1[i]);
2474       if(tl>=0) {
2475         if(rs1[i]==0)
2476         {
2477           emit_zeroreg(tl);
2478           if(th>=0) emit_zeroreg(th);
2479         }
2480         else
2481         {
2482           assert(sl>=0);
2483           assert(sh>=0);
2484           if(imm[i]) {
2485             if(opcode2[i]==0x38) // DSLL
2486             {
2487               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2488               emit_shlimm(sl,imm[i],tl);
2489             }
2490             if(opcode2[i]==0x3a) // DSRL
2491             {
2492               emit_shrdimm(sl,sh,imm[i],tl);
2493               if(th>=0) emit_shrimm(sh,imm[i],th);
2494             }
2495             if(opcode2[i]==0x3b) // DSRA
2496             {
2497               emit_shrdimm(sl,sh,imm[i],tl);
2498               if(th>=0) emit_sarimm(sh,imm[i],th);
2499             }
2500           }else{
2501             // Shift by zero
2502             if(sl!=tl) emit_mov(sl,tl);
2503             if(th>=0&&sh!=th) emit_mov(sh,th);
2504           }
2505         }
2506       }
2507     }
2508   }
2509   if(opcode2[i]==0x3c) // DSLL32
2510   {
2511     if(rt1[i]) {
2512       signed char sl,tl,th;
2513       tl=get_reg(i_regs->regmap,rt1[i]);
2514       th=get_reg(i_regs->regmap,rt1[i]|64);
2515       sl=get_reg(i_regs->regmap,rs1[i]);
2516       if(th>=0||tl>=0){
2517         assert(tl>=0);
2518         assert(th>=0);
2519         assert(sl>=0);
2520         emit_mov(sl,th);
2521         emit_zeroreg(tl);
2522         if(imm[i]>32)
2523         {
2524           emit_shlimm(th,imm[i]&31,th);
2525         }
2526       }
2527     }
2528   }
2529   if(opcode2[i]==0x3e) // DSRL32
2530   {
2531     if(rt1[i]) {
2532       signed char sh,tl,th;
2533       tl=get_reg(i_regs->regmap,rt1[i]);
2534       th=get_reg(i_regs->regmap,rt1[i]|64);
2535       sh=get_reg(i_regs->regmap,rs1[i]|64);
2536       if(tl>=0){
2537         assert(sh>=0);
2538         emit_mov(sh,tl);
2539         if(th>=0) emit_zeroreg(th);
2540         if(imm[i]>32)
2541         {
2542           emit_shrimm(tl,imm[i]&31,tl);
2543         }
2544       }
2545     }
2546   }
2547   if(opcode2[i]==0x3f) // DSRA32
2548   {
2549     if(rt1[i]) {
2550       signed char sh,tl;
2551       tl=get_reg(i_regs->regmap,rt1[i]);
2552       sh=get_reg(i_regs->regmap,rs1[i]|64);
2553       if(tl>=0){
2554         assert(sh>=0);
2555         emit_mov(sh,tl);
2556         if(imm[i]>32)
2557         {
2558           emit_sarimm(tl,imm[i]&31,tl);
2559         }
2560       }
2561     }
2562   }
2563 }
2564
2565 #ifndef shift_assemble
2566 void shift_assemble(int i,struct regstat *i_regs)
2567 {
2568   printf("Need shift_assemble for this architecture.\n");
2569   exit(1);
2570 }
2571 #endif
2572
2573 void load_assemble(int i,struct regstat *i_regs)
2574 {
2575   int s,th,tl,addr,map=-1;
2576   int offset;
2577   int jaddr=0;
2578   int memtarget=0,c=0;
2579   int fastload_reg_override=0;
2580   u_int hr,reglist=0;
2581   th=get_reg(i_regs->regmap,rt1[i]|64);
2582   tl=get_reg(i_regs->regmap,rt1[i]);
2583   s=get_reg(i_regs->regmap,rs1[i]);
2584   offset=imm[i];
2585   for(hr=0;hr<HOST_REGS;hr++) {
2586     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2587   }
2588   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2589   if(s>=0) {
2590     c=(i_regs->wasconst>>s)&1;
2591     if (c) {
2592       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2593     }
2594   }
2595   //printf("load_assemble: c=%d\n",c);
2596   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2597   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2598   if((tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80))
2599     ||rt1[i]==0) {
2600       // could be FIFO, must perform the read
2601       // ||dummy read
2602       assem_debug("(forced read)\n");
2603       tl=get_reg(i_regs->regmap,-1);
2604       assert(tl>=0);
2605   }
2606   if(offset||s<0||c) addr=tl;
2607   else addr=s;
2608   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2609  if(tl>=0) {
2610   //printf("load_assemble: c=%d\n",c);
2611   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2612   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2613   reglist&=~(1<<tl);
2614   if(th>=0) reglist&=~(1<<th);
2615   if(!c) {
2616     #ifdef RAM_OFFSET
2617     map=get_reg(i_regs->regmap,ROREG);
2618     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2619     #endif
2620     #ifdef R29_HACK
2621     // Strmnnrmn's speed hack
2622     if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2623     #endif
2624     {
2625       jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2626     }
2627   }
2628   else if(ram_offset&&memtarget) {
2629     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2630     fastload_reg_override=HOST_TEMPREG;
2631   }
2632   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2633   if (opcode[i]==0x20) { // LB
2634     if(!c||memtarget) {
2635       if(!dummy) {
2636         #ifdef HOST_IMM_ADDR32
2637         if(c)
2638           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2639         else
2640         #endif
2641         {
2642           //emit_xorimm(addr,3,tl);
2643           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2644           int x=0,a=tl;
2645 #ifdef BIG_ENDIAN_MIPS
2646           if(!c) emit_xorimm(addr,3,tl);
2647           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2648 #else
2649           if(!c) a=addr;
2650 #endif
2651           if(fastload_reg_override) a=fastload_reg_override;
2652
2653           emit_movsbl_indexed_tlb(x,a,map,tl);
2654         }
2655       }
2656       if(jaddr)
2657         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2658     }
2659     else
2660       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2661   }
2662   if (opcode[i]==0x21) { // LH
2663     if(!c||memtarget) {
2664       if(!dummy) {
2665         #ifdef HOST_IMM_ADDR32
2666         if(c)
2667           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2668         else
2669         #endif
2670         {
2671           int x=0,a=tl;
2672 #ifdef BIG_ENDIAN_MIPS
2673           if(!c) emit_xorimm(addr,2,tl);
2674           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2675 #else
2676           if(!c) a=addr;
2677 #endif
2678           if(fastload_reg_override) a=fastload_reg_override;
2679           //#ifdef
2680           //emit_movswl_indexed_tlb(x,tl,map,tl);
2681           //else
2682           if(map>=0) {
2683             emit_movswl_indexed(x,a,tl);
2684           }else{
2685             #if 1 //def RAM_OFFSET
2686             emit_movswl_indexed(x,a,tl);
2687             #else
2688             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2689             #endif
2690           }
2691         }
2692       }
2693       if(jaddr)
2694         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2695     }
2696     else
2697       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2698   }
2699   if (opcode[i]==0x23) { // LW
2700     if(!c||memtarget) {
2701       if(!dummy) {
2702         int a=addr;
2703         if(fastload_reg_override) a=fastload_reg_override;
2704         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2705         #ifdef HOST_IMM_ADDR32
2706         if(c)
2707           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2708         else
2709         #endif
2710         emit_readword_indexed_tlb(0,a,map,tl);
2711       }
2712       if(jaddr)
2713         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2714     }
2715     else
2716       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2717   }
2718   if (opcode[i]==0x24) { // LBU
2719     if(!c||memtarget) {
2720       if(!dummy) {
2721         #ifdef HOST_IMM_ADDR32
2722         if(c)
2723           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2724         else
2725         #endif
2726         {
2727           //emit_xorimm(addr,3,tl);
2728           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2729           int x=0,a=tl;
2730 #ifdef BIG_ENDIAN_MIPS
2731           if(!c) emit_xorimm(addr,3,tl);
2732           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2733 #else
2734           if(!c) a=addr;
2735 #endif
2736           if(fastload_reg_override) a=fastload_reg_override;
2737
2738           emit_movzbl_indexed_tlb(x,a,map,tl);
2739         }
2740       }
2741       if(jaddr)
2742         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2743     }
2744     else
2745       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2746   }
2747   if (opcode[i]==0x25) { // LHU
2748     if(!c||memtarget) {
2749       if(!dummy) {
2750         #ifdef HOST_IMM_ADDR32
2751         if(c)
2752           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
2753         else
2754         #endif
2755         {
2756           int x=0,a=tl;
2757 #ifdef BIG_ENDIAN_MIPS
2758           if(!c) emit_xorimm(addr,2,tl);
2759           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2760 #else
2761           if(!c) a=addr;
2762 #endif
2763           if(fastload_reg_override) a=fastload_reg_override;
2764           //#ifdef
2765           //emit_movzwl_indexed_tlb(x,tl,map,tl);
2766           //#else
2767           if(map>=0) {
2768             emit_movzwl_indexed(x,a,tl);
2769           }else{
2770             #if 1 //def RAM_OFFSET
2771             emit_movzwl_indexed(x,a,tl);
2772             #else
2773             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
2774             #endif
2775           }
2776         }
2777       }
2778       if(jaddr)
2779         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2780     }
2781     else
2782       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2783   }
2784   if (opcode[i]==0x27) { // LWU
2785     assert(th>=0);
2786     if(!c||memtarget) {
2787       if(!dummy) {
2788         int a=addr;
2789         if(fastload_reg_override) a=fastload_reg_override;
2790         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2791         #ifdef HOST_IMM_ADDR32
2792         if(c)
2793           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2794         else
2795         #endif
2796         emit_readword_indexed_tlb(0,a,map,tl);
2797       }
2798       if(jaddr)
2799         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2800     }
2801     else {
2802       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2803     }
2804     emit_zeroreg(th);
2805   }
2806   if (opcode[i]==0x37) { // LD
2807     if(!c||memtarget) {
2808       if(!dummy) {
2809         int a=addr;
2810         if(fastload_reg_override) a=fastload_reg_override;
2811         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
2812         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
2813         #ifdef HOST_IMM_ADDR32
2814         if(c)
2815           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
2816         else
2817         #endif
2818         emit_readdword_indexed_tlb(0,a,map,th,tl);
2819       }
2820       if(jaddr)
2821         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2822     }
2823     else
2824       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2825   }
2826  }
2827   //emit_storereg(rt1[i],tl); // DEBUG
2828   //if(opcode[i]==0x23)
2829   //if(opcode[i]==0x24)
2830   //if(opcode[i]==0x23||opcode[i]==0x24)
2831   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
2832   {
2833     //emit_pusha();
2834     save_regs(0x100f);
2835         emit_readword((int)&last_count,ECX);
2836         #ifdef __i386__
2837         if(get_reg(i_regs->regmap,CCREG)<0)
2838           emit_loadreg(CCREG,HOST_CCREG);
2839         emit_add(HOST_CCREG,ECX,HOST_CCREG);
2840         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
2841         emit_writeword(HOST_CCREG,(int)&Count);
2842         #endif
2843         #ifdef __arm__
2844         if(get_reg(i_regs->regmap,CCREG)<0)
2845           emit_loadreg(CCREG,0);
2846         else
2847           emit_mov(HOST_CCREG,0);
2848         emit_add(0,ECX,0);
2849         emit_addimm(0,2*ccadj[i],0);
2850         emit_writeword(0,(int)&Count);
2851         #endif
2852     emit_call((int)memdebug);
2853     //emit_popa();
2854     restore_regs(0x100f);
2855   }*/
2856 }
2857
2858 #ifndef loadlr_assemble
2859 void loadlr_assemble(int i,struct regstat *i_regs)
2860 {
2861   printf("Need loadlr_assemble for this architecture.\n");
2862   exit(1);
2863 }
2864 #endif
2865
2866 void store_assemble(int i,struct regstat *i_regs)
2867 {
2868   int s,th,tl,map=-1;
2869   int addr,temp;
2870   int offset;
2871   int jaddr=0,type;
2872   int memtarget=0,c=0;
2873   int agr=AGEN1+(i&1);
2874   int faststore_reg_override=0;
2875   u_int hr,reglist=0;
2876   th=get_reg(i_regs->regmap,rs2[i]|64);
2877   tl=get_reg(i_regs->regmap,rs2[i]);
2878   s=get_reg(i_regs->regmap,rs1[i]);
2879   temp=get_reg(i_regs->regmap,agr);
2880   if(temp<0) temp=get_reg(i_regs->regmap,-1);
2881   offset=imm[i];
2882   if(s>=0) {
2883     c=(i_regs->wasconst>>s)&1;
2884     if(c) {
2885       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2886     }
2887   }
2888   assert(tl>=0);
2889   assert(temp>=0);
2890   for(hr=0;hr<HOST_REGS;hr++) {
2891     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2892   }
2893   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2894   if(offset||s<0||c) addr=temp;
2895   else addr=s;
2896   if(!c) {
2897     jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
2898   }
2899   else if(ram_offset&&memtarget) {
2900     emit_addimm(addr,ram_offset,HOST_TEMPREG);
2901     faststore_reg_override=HOST_TEMPREG;
2902   }
2903
2904   if (opcode[i]==0x28) { // SB
2905     if(!c||memtarget) {
2906       int x=0,a=temp;
2907 #ifdef BIG_ENDIAN_MIPS
2908       if(!c) emit_xorimm(addr,3,temp);
2909       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2910 #else
2911       if(!c) a=addr;
2912 #endif
2913       if(faststore_reg_override) a=faststore_reg_override;
2914       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
2915       emit_writebyte_indexed_tlb(tl,x,a,map,a);
2916     }
2917     type=STOREB_STUB;
2918   }
2919   if (opcode[i]==0x29) { // SH
2920     if(!c||memtarget) {
2921       int x=0,a=temp;
2922 #ifdef BIG_ENDIAN_MIPS
2923       if(!c) emit_xorimm(addr,2,temp);
2924       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2925 #else
2926       if(!c) a=addr;
2927 #endif
2928       if(faststore_reg_override) a=faststore_reg_override;
2929       //#ifdef
2930       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
2931       //#else
2932       if(map>=0) {
2933         emit_writehword_indexed(tl,x,a);
2934       }else
2935         //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
2936         emit_writehword_indexed(tl,x,a);
2937     }
2938     type=STOREH_STUB;
2939   }
2940   if (opcode[i]==0x2B) { // SW
2941     if(!c||memtarget) {
2942       int a=addr;
2943       if(faststore_reg_override) a=faststore_reg_override;
2944       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
2945       emit_writeword_indexed_tlb(tl,0,a,map,temp);
2946     }
2947     type=STOREW_STUB;
2948   }
2949   if (opcode[i]==0x3F) { // SD
2950     if(!c||memtarget) {
2951       int a=addr;
2952       if(faststore_reg_override) a=faststore_reg_override;
2953       if(rs2[i]) {
2954         assert(th>=0);
2955         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
2956         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
2957         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
2958       }else{
2959         // Store zero
2960         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
2961         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
2962         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
2963       }
2964     }
2965     type=STORED_STUB;
2966   }
2967   if(jaddr) {
2968     // PCSX store handlers don't check invcode again
2969     reglist|=1<<addr;
2970     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2971     jaddr=0;
2972   }
2973   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
2974     if(!c||memtarget) {
2975       #ifdef DESTRUCTIVE_SHIFT
2976       // The x86 shift operation is 'destructive'; it overwrites the
2977       // source register, so we need to make a copy first and use that.
2978       addr=temp;
2979       #endif
2980       #if defined(HOST_IMM8)
2981       int ir=get_reg(i_regs->regmap,INVCP);
2982       assert(ir>=0);
2983       emit_cmpmem_indexedsr12_reg(ir,addr,1);
2984       #else
2985       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
2986       #endif
2987       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
2988       emit_callne(invalidate_addr_reg[addr]);
2989       #else
2990       int jaddr2=(int)out;
2991       emit_jne(0);
2992       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
2993       #endif
2994     }
2995   }
2996   u_int addr_val=constmap[i][s]+offset;
2997   if(jaddr) {
2998     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2999   } else if(c&&!memtarget) {
3000     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3001   }
3002   // basic current block modification detection..
3003   // not looking back as that should be in mips cache already
3004   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3005     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3006     assert(i_regs->regmap==regs[i].regmap); // not delay slot
3007     if(i_regs->regmap==regs[i].regmap) {
3008       load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3009       wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3010       emit_movimm(start+i*4+4,0);
3011       emit_writeword(0,(int)&pcaddr);
3012       emit_jmp((int)do_interrupt);
3013     }
3014   }
3015   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3016   //if(opcode[i]==0x2B || opcode[i]==0x28)
3017   //if(opcode[i]==0x2B || opcode[i]==0x29)
3018   //if(opcode[i]==0x2B)
3019   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3020   {
3021     #ifdef __i386__
3022     emit_pusha();
3023     #endif
3024     #ifdef __arm__
3025     save_regs(0x100f);
3026     #endif
3027         emit_readword((int)&last_count,ECX);
3028         #ifdef __i386__
3029         if(get_reg(i_regs->regmap,CCREG)<0)
3030           emit_loadreg(CCREG,HOST_CCREG);
3031         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3032         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3033         emit_writeword(HOST_CCREG,(int)&Count);
3034         #endif
3035         #ifdef __arm__
3036         if(get_reg(i_regs->regmap,CCREG)<0)
3037           emit_loadreg(CCREG,0);
3038         else
3039           emit_mov(HOST_CCREG,0);
3040         emit_add(0,ECX,0);
3041         emit_addimm(0,2*ccadj[i],0);
3042         emit_writeword(0,(int)&Count);
3043         #endif
3044     emit_call((int)memdebug);
3045     #ifdef __i386__
3046     emit_popa();
3047     #endif
3048     #ifdef __arm__
3049     restore_regs(0x100f);
3050     #endif
3051   }*/
3052 }
3053
3054 void storelr_assemble(int i,struct regstat *i_regs)
3055 {
3056   int s,th,tl;
3057   int temp;
3058   int temp2=-1;
3059   int offset;
3060   int jaddr=0;
3061   int case1,case2,case3;
3062   int done0,done1,done2;
3063   int memtarget=0,c=0;
3064   int agr=AGEN1+(i&1);
3065   u_int hr,reglist=0;
3066   th=get_reg(i_regs->regmap,rs2[i]|64);
3067   tl=get_reg(i_regs->regmap,rs2[i]);
3068   s=get_reg(i_regs->regmap,rs1[i]);
3069   temp=get_reg(i_regs->regmap,agr);
3070   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3071   offset=imm[i];
3072   if(s>=0) {
3073     c=(i_regs->isconst>>s)&1;
3074     if(c) {
3075       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3076     }
3077   }
3078   assert(tl>=0);
3079   for(hr=0;hr<HOST_REGS;hr++) {
3080     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3081   }
3082   assert(temp>=0);
3083   if(!c) {
3084     emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3085     if(!offset&&s!=temp) emit_mov(s,temp);
3086     jaddr=(int)out;
3087     emit_jno(0);
3088   }
3089   else
3090   {
3091     if(!memtarget||!rs1[i]) {
3092       jaddr=(int)out;
3093       emit_jmp(0);
3094     }
3095   }
3096   #ifdef RAM_OFFSET
3097   int map=get_reg(i_regs->regmap,ROREG);
3098   if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3099   #else
3100   if((u_int)rdram!=0x80000000)
3101     emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3102   #endif
3103
3104   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3105     temp2=get_reg(i_regs->regmap,FTEMP);
3106     if(!rs2[i]) temp2=th=tl;
3107   }
3108
3109 #ifndef BIG_ENDIAN_MIPS
3110     emit_xorimm(temp,3,temp);
3111 #endif
3112   emit_testimm(temp,2);
3113   case2=(int)out;
3114   emit_jne(0);
3115   emit_testimm(temp,1);
3116   case1=(int)out;
3117   emit_jne(0);
3118   // 0
3119   if (opcode[i]==0x2A) { // SWL
3120     emit_writeword_indexed(tl,0,temp);
3121   }
3122   if (opcode[i]==0x2E) { // SWR
3123     emit_writebyte_indexed(tl,3,temp);
3124   }
3125   if (opcode[i]==0x2C) { // SDL
3126     emit_writeword_indexed(th,0,temp);
3127     if(rs2[i]) emit_mov(tl,temp2);
3128   }
3129   if (opcode[i]==0x2D) { // SDR
3130     emit_writebyte_indexed(tl,3,temp);
3131     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3132   }
3133   done0=(int)out;
3134   emit_jmp(0);
3135   // 1
3136   set_jump_target(case1,(int)out);
3137   if (opcode[i]==0x2A) { // SWL
3138     // Write 3 msb into three least significant bytes
3139     if(rs2[i]) emit_rorimm(tl,8,tl);
3140     emit_writehword_indexed(tl,-1,temp);
3141     if(rs2[i]) emit_rorimm(tl,16,tl);
3142     emit_writebyte_indexed(tl,1,temp);
3143     if(rs2[i]) emit_rorimm(tl,8,tl);
3144   }
3145   if (opcode[i]==0x2E) { // SWR
3146     // Write two lsb into two most significant bytes
3147     emit_writehword_indexed(tl,1,temp);
3148   }
3149   if (opcode[i]==0x2C) { // SDL
3150     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3151     // Write 3 msb into three least significant bytes
3152     if(rs2[i]) emit_rorimm(th,8,th);
3153     emit_writehword_indexed(th,-1,temp);
3154     if(rs2[i]) emit_rorimm(th,16,th);
3155     emit_writebyte_indexed(th,1,temp);
3156     if(rs2[i]) emit_rorimm(th,8,th);
3157   }
3158   if (opcode[i]==0x2D) { // SDR
3159     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3160     // Write two lsb into two most significant bytes
3161     emit_writehword_indexed(tl,1,temp);
3162   }
3163   done1=(int)out;
3164   emit_jmp(0);
3165   // 2
3166   set_jump_target(case2,(int)out);
3167   emit_testimm(temp,1);
3168   case3=(int)out;
3169   emit_jne(0);
3170   if (opcode[i]==0x2A) { // SWL
3171     // Write two msb into two least significant bytes
3172     if(rs2[i]) emit_rorimm(tl,16,tl);
3173     emit_writehword_indexed(tl,-2,temp);
3174     if(rs2[i]) emit_rorimm(tl,16,tl);
3175   }
3176   if (opcode[i]==0x2E) { // SWR
3177     // Write 3 lsb into three most significant bytes
3178     emit_writebyte_indexed(tl,-1,temp);
3179     if(rs2[i]) emit_rorimm(tl,8,tl);
3180     emit_writehword_indexed(tl,0,temp);
3181     if(rs2[i]) emit_rorimm(tl,24,tl);
3182   }
3183   if (opcode[i]==0x2C) { // SDL
3184     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3185     // Write two msb into two least significant bytes
3186     if(rs2[i]) emit_rorimm(th,16,th);
3187     emit_writehword_indexed(th,-2,temp);
3188     if(rs2[i]) emit_rorimm(th,16,th);
3189   }
3190   if (opcode[i]==0x2D) { // SDR
3191     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3192     // Write 3 lsb into three most significant bytes
3193     emit_writebyte_indexed(tl,-1,temp);
3194     if(rs2[i]) emit_rorimm(tl,8,tl);
3195     emit_writehword_indexed(tl,0,temp);
3196     if(rs2[i]) emit_rorimm(tl,24,tl);
3197   }
3198   done2=(int)out;
3199   emit_jmp(0);
3200   // 3
3201   set_jump_target(case3,(int)out);
3202   if (opcode[i]==0x2A) { // SWL
3203     // Write msb into least significant byte
3204     if(rs2[i]) emit_rorimm(tl,24,tl);
3205     emit_writebyte_indexed(tl,-3,temp);
3206     if(rs2[i]) emit_rorimm(tl,8,tl);
3207   }
3208   if (opcode[i]==0x2E) { // SWR
3209     // Write entire word
3210     emit_writeword_indexed(tl,-3,temp);
3211   }
3212   if (opcode[i]==0x2C) { // SDL
3213     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3214     // Write msb into least significant byte
3215     if(rs2[i]) emit_rorimm(th,24,th);
3216     emit_writebyte_indexed(th,-3,temp);
3217     if(rs2[i]) emit_rorimm(th,8,th);
3218   }
3219   if (opcode[i]==0x2D) { // SDR
3220     if(rs2[i]) emit_mov(th,temp2);
3221     // Write entire word
3222     emit_writeword_indexed(tl,-3,temp);
3223   }
3224   set_jump_target(done0,(int)out);
3225   set_jump_target(done1,(int)out);
3226   set_jump_target(done2,(int)out);
3227   if (opcode[i]==0x2C) { // SDL
3228     emit_testimm(temp,4);
3229     done0=(int)out;
3230     emit_jne(0);
3231     emit_andimm(temp,~3,temp);
3232     emit_writeword_indexed(temp2,4,temp);
3233     set_jump_target(done0,(int)out);
3234   }
3235   if (opcode[i]==0x2D) { // SDR
3236     emit_testimm(temp,4);
3237     done0=(int)out;
3238     emit_jeq(0);
3239     emit_andimm(temp,~3,temp);
3240     emit_writeword_indexed(temp2,-4,temp);
3241     set_jump_target(done0,(int)out);
3242   }
3243   if(!c||!memtarget)
3244     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3245   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3246     #ifdef RAM_OFFSET
3247     int map=get_reg(i_regs->regmap,ROREG);
3248     if(map<0) map=HOST_TEMPREG;
3249     gen_orig_addr_w(temp,map);
3250     #else
3251     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3252     #endif
3253     #if defined(HOST_IMM8)
3254     int ir=get_reg(i_regs->regmap,INVCP);
3255     assert(ir>=0);
3256     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3257     #else
3258     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3259     #endif
3260     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3261     emit_callne(invalidate_addr_reg[temp]);
3262     #else
3263     int jaddr2=(int)out;
3264     emit_jne(0);
3265     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3266     #endif
3267   }
3268   /*
3269     emit_pusha();
3270     //save_regs(0x100f);
3271         emit_readword((int)&last_count,ECX);
3272         if(get_reg(i_regs->regmap,CCREG)<0)
3273           emit_loadreg(CCREG,HOST_CCREG);
3274         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3275         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3276         emit_writeword(HOST_CCREG,(int)&Count);
3277     emit_call((int)memdebug);
3278     emit_popa();
3279     //restore_regs(0x100f);
3280   */
3281 }
3282
3283 void c1ls_assemble(int i,struct regstat *i_regs)
3284 {
3285   cop1_unusable(i, i_regs);
3286 }
3287
3288 void c2ls_assemble(int i,struct regstat *i_regs)
3289 {
3290   int s,tl;
3291   int ar;
3292   int offset;
3293   int memtarget=0,c=0;
3294   int jaddr2=0,type;
3295   int agr=AGEN1+(i&1);
3296   int fastio_reg_override=0;
3297   u_int hr,reglist=0;
3298   u_int copr=(source[i]>>16)&0x1f;
3299   s=get_reg(i_regs->regmap,rs1[i]);
3300   tl=get_reg(i_regs->regmap,FTEMP);
3301   offset=imm[i];
3302   assert(rs1[i]>0);
3303   assert(tl>=0);
3304
3305   for(hr=0;hr<HOST_REGS;hr++) {
3306     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3307   }
3308   if(i_regs->regmap[HOST_CCREG]==CCREG)
3309     reglist&=~(1<<HOST_CCREG);
3310
3311   // get the address
3312   if (opcode[i]==0x3a) { // SWC2
3313     ar=get_reg(i_regs->regmap,agr);
3314     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3315     reglist|=1<<ar;
3316   } else { // LWC2
3317     ar=tl;
3318   }
3319   if(s>=0) c=(i_regs->wasconst>>s)&1;
3320   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3321   if (!offset&&!c&&s>=0) ar=s;
3322   assert(ar>=0);
3323
3324   if (opcode[i]==0x3a) { // SWC2
3325     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3326     type=STOREW_STUB;
3327   }
3328   else
3329     type=LOADW_STUB;
3330
3331   if(c&&!memtarget) {
3332     jaddr2=(int)out;
3333     emit_jmp(0); // inline_readstub/inline_writestub?
3334   }
3335   else {
3336     if(!c) {
3337       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3338     }
3339     else if(ram_offset&&memtarget) {
3340       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3341       fastio_reg_override=HOST_TEMPREG;
3342     }
3343     if (opcode[i]==0x32) { // LWC2
3344       #ifdef HOST_IMM_ADDR32
3345       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3346       else
3347       #endif
3348       int a=ar;
3349       if(fastio_reg_override) a=fastio_reg_override;
3350       emit_readword_indexed(0,a,tl);
3351     }
3352     if (opcode[i]==0x3a) { // SWC2
3353       #ifdef DESTRUCTIVE_SHIFT
3354       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3355       #endif
3356       int a=ar;
3357       if(fastio_reg_override) a=fastio_reg_override;
3358       emit_writeword_indexed(tl,0,a);
3359     }
3360   }
3361   if(jaddr2)
3362     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3363   if(opcode[i]==0x3a) // SWC2
3364   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3365 #if defined(HOST_IMM8)
3366     int ir=get_reg(i_regs->regmap,INVCP);
3367     assert(ir>=0);
3368     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3369 #else
3370     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3371 #endif
3372     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3373     emit_callne(invalidate_addr_reg[ar]);
3374     #else
3375     int jaddr3=(int)out;
3376     emit_jne(0);
3377     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3378     #endif
3379   }
3380   if (opcode[i]==0x32) { // LWC2
3381     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3382   }
3383 }
3384
3385 #ifndef multdiv_assemble
3386 void multdiv_assemble(int i,struct regstat *i_regs)
3387 {
3388   printf("Need multdiv_assemble for this architecture.\n");
3389   exit(1);
3390 }
3391 #endif
3392
3393 void mov_assemble(int i,struct regstat *i_regs)
3394 {
3395   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3396   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3397   if(rt1[i]) {
3398     signed char sh,sl,th,tl;
3399     th=get_reg(i_regs->regmap,rt1[i]|64);
3400     tl=get_reg(i_regs->regmap,rt1[i]);
3401     //assert(tl>=0);
3402     if(tl>=0) {
3403       sh=get_reg(i_regs->regmap,rs1[i]|64);
3404       sl=get_reg(i_regs->regmap,rs1[i]);
3405       if(sl>=0) emit_mov(sl,tl);
3406       else emit_loadreg(rs1[i],tl);
3407       if(th>=0) {
3408         if(sh>=0) emit_mov(sh,th);
3409         else emit_loadreg(rs1[i]|64,th);
3410       }
3411     }
3412   }
3413 }
3414
3415 #ifndef fconv_assemble
3416 void fconv_assemble(int i,struct regstat *i_regs)
3417 {
3418   printf("Need fconv_assemble for this architecture.\n");
3419   exit(1);
3420 }
3421 #endif
3422
3423 #if 0
3424 void float_assemble(int i,struct regstat *i_regs)
3425 {
3426   printf("Need float_assemble for this architecture.\n");
3427   exit(1);
3428 }
3429 #endif
3430
3431 void syscall_assemble(int i,struct regstat *i_regs)
3432 {
3433   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3434   assert(ccreg==HOST_CCREG);
3435   assert(!is_delayslot);
3436   (void)ccreg;
3437   emit_movimm(start+i*4,EAX); // Get PC
3438   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3439   emit_jmp((int)jump_syscall_hle); // XXX
3440 }
3441
3442 void hlecall_assemble(int i,struct regstat *i_regs)
3443 {
3444   extern void psxNULL();
3445   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3446   assert(ccreg==HOST_CCREG);
3447   assert(!is_delayslot);
3448   (void)ccreg;
3449   emit_movimm(start+i*4+4,0); // Get PC
3450   uint32_t hleCode = source[i] & 0x03ffffff;
3451   if (hleCode >= (sizeof(psxHLEt) / sizeof(psxHLEt[0])))
3452     emit_movimm((int)psxNULL,1);
3453   else
3454     emit_movimm((int)psxHLEt[hleCode],1);
3455   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3456   emit_jmp((int)jump_hlecall);
3457 }
3458
3459 void intcall_assemble(int i,struct regstat *i_regs)
3460 {
3461   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3462   assert(ccreg==HOST_CCREG);
3463   assert(!is_delayslot);
3464   (void)ccreg;
3465   emit_movimm(start+i*4,0); // Get PC
3466   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3467   emit_jmp((int)jump_intcall);
3468 }
3469
3470 void ds_assemble(int i,struct regstat *i_regs)
3471 {
3472   speculate_register_values(i);
3473   is_delayslot=1;
3474   switch(itype[i]) {
3475     case ALU:
3476       alu_assemble(i,i_regs);break;
3477     case IMM16:
3478       imm16_assemble(i,i_regs);break;
3479     case SHIFT:
3480       shift_assemble(i,i_regs);break;
3481     case SHIFTIMM:
3482       shiftimm_assemble(i,i_regs);break;
3483     case LOAD:
3484       load_assemble(i,i_regs);break;
3485     case LOADLR:
3486       loadlr_assemble(i,i_regs);break;
3487     case STORE:
3488       store_assemble(i,i_regs);break;
3489     case STORELR:
3490       storelr_assemble(i,i_regs);break;
3491     case COP0:
3492       cop0_assemble(i,i_regs);break;
3493     case COP1:
3494       cop1_assemble(i,i_regs);break;
3495     case C1LS:
3496       c1ls_assemble(i,i_regs);break;
3497     case COP2:
3498       cop2_assemble(i,i_regs);break;
3499     case C2LS:
3500       c2ls_assemble(i,i_regs);break;
3501     case C2OP:
3502       c2op_assemble(i,i_regs);break;
3503     case FCONV:
3504       fconv_assemble(i,i_regs);break;
3505     case FLOAT:
3506       float_assemble(i,i_regs);break;
3507     case FCOMP:
3508       fcomp_assemble(i,i_regs);break;
3509     case MULTDIV:
3510       multdiv_assemble(i,i_regs);break;
3511     case MOV:
3512       mov_assemble(i,i_regs);break;
3513     case SYSCALL:
3514     case HLECALL:
3515     case INTCALL:
3516     case SPAN:
3517     case UJUMP:
3518     case RJUMP:
3519     case CJUMP:
3520     case SJUMP:
3521     case FJUMP:
3522       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
3523   }
3524   is_delayslot=0;
3525 }
3526
3527 // Is the branch target a valid internal jump?
3528 int internal_branch(uint64_t i_is32,int addr)
3529 {
3530   if(addr&1) return 0; // Indirect (register) jump
3531   if(addr>=start && addr<start+slen*4-4)
3532   {
3533     //int t=(addr-start)>>2;
3534     // Delay slots are not valid branch targets
3535     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
3536     // 64 -> 32 bit transition requires a recompile
3537     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
3538     {
3539       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
3540       else printf("optimizable: yes\n");
3541     }*/
3542     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
3543     return 1;
3544   }
3545   return 0;
3546 }
3547
3548 #ifndef wb_invalidate
3549 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
3550   uint64_t u,uint64_t uu)
3551 {
3552   int hr;
3553   for(hr=0;hr<HOST_REGS;hr++) {
3554     if(hr!=EXCLUDE_REG) {
3555       if(pre[hr]!=entry[hr]) {
3556         if(pre[hr]>=0) {
3557           if((dirty>>hr)&1) {
3558             if(get_reg(entry,pre[hr])<0) {
3559               if(pre[hr]<64) {
3560                 if(!((u>>pre[hr])&1)) {
3561                   emit_storereg(pre[hr],hr);
3562                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
3563                     emit_sarimm(hr,31,hr);
3564                     emit_storereg(pre[hr]|64,hr);
3565                   }
3566                 }
3567               }else{
3568                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
3569                   emit_storereg(pre[hr],hr);
3570                 }
3571               }
3572             }
3573           }
3574         }
3575       }
3576     }
3577   }
3578   // Move from one register to another (no writeback)
3579   for(hr=0;hr<HOST_REGS;hr++) {
3580     if(hr!=EXCLUDE_REG) {
3581       if(pre[hr]!=entry[hr]) {
3582         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
3583           int nr;
3584           if((nr=get_reg(entry,pre[hr]))>=0) {
3585             emit_mov(hr,nr);
3586           }
3587         }
3588       }
3589     }
3590   }
3591 }
3592 #endif
3593
3594 // Load the specified registers
3595 // This only loads the registers given as arguments because
3596 // we don't want to load things that will be overwritten
3597 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
3598 {
3599   int hr;
3600   // Load 32-bit regs
3601   for(hr=0;hr<HOST_REGS;hr++) {
3602     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3603       if(entry[hr]!=regmap[hr]) {
3604         if(regmap[hr]==rs1||regmap[hr]==rs2)
3605         {
3606           if(regmap[hr]==0) {
3607             emit_zeroreg(hr);
3608           }
3609           else
3610           {
3611             emit_loadreg(regmap[hr],hr);
3612           }
3613         }
3614       }
3615     }
3616   }
3617   //Load 64-bit regs
3618   for(hr=0;hr<HOST_REGS;hr++) {
3619     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3620       if(entry[hr]!=regmap[hr]) {
3621         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
3622         {
3623           assert(regmap[hr]!=64);
3624           if((is32>>(regmap[hr]&63))&1) {
3625             int lr=get_reg(regmap,regmap[hr]-64);
3626             if(lr>=0)
3627               emit_sarimm(lr,31,hr);
3628             else
3629               emit_loadreg(regmap[hr],hr);
3630           }
3631           else
3632           {
3633             emit_loadreg(regmap[hr],hr);
3634           }
3635         }
3636       }
3637     }
3638   }
3639 }
3640
3641 // Load registers prior to the start of a loop
3642 // so that they are not loaded within the loop
3643 static void loop_preload(signed char pre[],signed char entry[])
3644 {
3645   int hr;
3646   for(hr=0;hr<HOST_REGS;hr++) {
3647     if(hr!=EXCLUDE_REG) {
3648       if(pre[hr]!=entry[hr]) {
3649         if(entry[hr]>=0) {
3650           if(get_reg(pre,entry[hr])<0) {
3651             assem_debug("loop preload:\n");
3652             //printf("loop preload: %d\n",hr);
3653             if(entry[hr]==0) {
3654               emit_zeroreg(hr);
3655             }
3656             else if(entry[hr]<TEMPREG)
3657             {
3658               emit_loadreg(entry[hr],hr);
3659             }
3660             else if(entry[hr]-64<TEMPREG)
3661             {
3662               emit_loadreg(entry[hr],hr);
3663             }
3664           }
3665         }
3666       }
3667     }
3668   }
3669 }
3670
3671 // Generate address for load/store instruction
3672 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
3673 void address_generation(int i,struct regstat *i_regs,signed char entry[])
3674 {
3675   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
3676     int ra=-1;
3677     int agr=AGEN1+(i&1);
3678     if(itype[i]==LOAD) {
3679       ra=get_reg(i_regs->regmap,rt1[i]);
3680       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3681       assert(ra>=0);
3682     }
3683     if(itype[i]==LOADLR) {
3684       ra=get_reg(i_regs->regmap,FTEMP);
3685     }
3686     if(itype[i]==STORE||itype[i]==STORELR) {
3687       ra=get_reg(i_regs->regmap,agr);
3688       if(ra<0) ra=get_reg(i_regs->regmap,-1);
3689     }
3690     if(itype[i]==C1LS||itype[i]==C2LS) {
3691       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
3692         ra=get_reg(i_regs->regmap,FTEMP);
3693       else { // SWC1/SDC1/SWC2/SDC2
3694         ra=get_reg(i_regs->regmap,agr);
3695         if(ra<0) ra=get_reg(i_regs->regmap,-1);
3696       }
3697     }
3698     int rs=get_reg(i_regs->regmap,rs1[i]);
3699     if(ra>=0) {
3700       int offset=imm[i];
3701       int c=(i_regs->wasconst>>rs)&1;
3702       if(rs1[i]==0) {
3703         // Using r0 as a base address
3704         if(!entry||entry[ra]!=agr) {
3705           if (opcode[i]==0x22||opcode[i]==0x26) {
3706             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3707           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3708             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3709           }else{
3710             emit_movimm(offset,ra);
3711           }
3712         } // else did it in the previous cycle
3713       }
3714       else if(rs<0) {
3715         if(!entry||entry[ra]!=rs1[i])
3716           emit_loadreg(rs1[i],ra);
3717         //if(!entry||entry[ra]!=rs1[i])
3718         //  printf("poor load scheduling!\n");
3719       }
3720       else if(c) {
3721         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
3722           if(!entry||entry[ra]!=agr) {
3723             if (opcode[i]==0x22||opcode[i]==0x26) {
3724               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3725             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
3726               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3727             }else{
3728               #ifdef HOST_IMM_ADDR32
3729               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3730               #endif
3731               emit_movimm(constmap[i][rs]+offset,ra);
3732               regs[i].loadedconst|=1<<ra;
3733             }
3734           } // else did it in the previous cycle
3735         } // else load_consts already did it
3736       }
3737       if(offset&&!c&&rs1[i]) {
3738         if(rs>=0) {
3739           emit_addimm(rs,offset,ra);
3740         }else{
3741           emit_addimm(ra,offset,ra);
3742         }
3743       }
3744     }
3745   }
3746   // Preload constants for next instruction
3747   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
3748     int agr,ra;
3749     // Actual address
3750     agr=AGEN1+((i+1)&1);
3751     ra=get_reg(i_regs->regmap,agr);
3752     if(ra>=0) {
3753       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
3754       int offset=imm[i+1];
3755       int c=(regs[i+1].wasconst>>rs)&1;
3756       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
3757         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3758           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
3759         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3760           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
3761         }else{
3762           #ifdef HOST_IMM_ADDR32
3763           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32)) // LWC1/LDC1/LWC2/LDC2
3764           #endif
3765           emit_movimm(constmap[i+1][rs]+offset,ra);
3766           regs[i+1].loadedconst|=1<<ra;
3767         }
3768       }
3769       else if(rs1[i+1]==0) {
3770         // Using r0 as a base address
3771         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
3772           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
3773         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
3774           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
3775         }else{
3776           emit_movimm(offset,ra);
3777         }
3778       }
3779     }
3780   }
3781 }
3782
3783 static int get_final_value(int hr, int i, int *value)
3784 {
3785   int reg=regs[i].regmap[hr];
3786   while(i<slen-1) {
3787     if(regs[i+1].regmap[hr]!=reg) break;
3788     if(!((regs[i+1].isconst>>hr)&1)) break;
3789     if(bt[i+1]) break;
3790     i++;
3791   }
3792   if(i<slen-1) {
3793     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
3794       *value=constmap[i][hr];
3795       return 1;
3796     }
3797     if(!bt[i+1]) {
3798       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
3799         // Load in delay slot, out-of-order execution
3800         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
3801         {
3802           // Precompute load address
3803           *value=constmap[i][hr]+imm[i+2];
3804           return 1;
3805         }
3806       }
3807       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
3808       {
3809         // Precompute load address
3810         *value=constmap[i][hr]+imm[i+1];
3811         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
3812         return 1;
3813       }
3814     }
3815   }
3816   *value=constmap[i][hr];
3817   //printf("c=%x\n",(int)constmap[i][hr]);
3818   if(i==slen-1) return 1;
3819   if(reg<64) {
3820     return !((unneeded_reg[i+1]>>reg)&1);
3821   }else{
3822     return !((unneeded_reg_upper[i+1]>>reg)&1);
3823   }
3824 }
3825
3826 // Load registers with known constants
3827 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
3828 {
3829   int hr,hr2;
3830   // propagate loaded constant flags
3831   if(i==0||bt[i])
3832     regs[i].loadedconst=0;
3833   else {
3834     for(hr=0;hr<HOST_REGS;hr++) {
3835       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
3836          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
3837       {
3838         regs[i].loadedconst|=1<<hr;
3839       }
3840     }
3841   }
3842   // Load 32-bit regs
3843   for(hr=0;hr<HOST_REGS;hr++) {
3844     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3845       //if(entry[hr]!=regmap[hr]) {
3846       if(!((regs[i].loadedconst>>hr)&1)) {
3847         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3848           int value,similar=0;
3849           if(get_final_value(hr,i,&value)) {
3850             // see if some other register has similar value
3851             for(hr2=0;hr2<HOST_REGS;hr2++) {
3852               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
3853                 if(is_similar_value(value,constmap[i][hr2])) {
3854                   similar=1;
3855                   break;
3856                 }
3857               }
3858             }
3859             if(similar) {
3860               int value2;
3861               if(get_final_value(hr2,i,&value2)) // is this needed?
3862                 emit_movimm_from(value2,hr2,value,hr);
3863               else
3864                 emit_movimm(value,hr);
3865             }
3866             else if(value==0) {
3867               emit_zeroreg(hr);
3868             }
3869             else {
3870               emit_movimm(value,hr);
3871             }
3872           }
3873           regs[i].loadedconst|=1<<hr;
3874         }
3875       }
3876     }
3877   }
3878   // Load 64-bit regs
3879   for(hr=0;hr<HOST_REGS;hr++) {
3880     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
3881       //if(entry[hr]!=regmap[hr]) {
3882       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
3883         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3884           if((is32>>(regmap[hr]&63))&1) {
3885             int lr=get_reg(regmap,regmap[hr]-64);
3886             assert(lr>=0);
3887             emit_sarimm(lr,31,hr);
3888           }
3889           else
3890           {
3891             int value;
3892             if(get_final_value(hr,i,&value)) {
3893               if(value==0) {
3894                 emit_zeroreg(hr);
3895               }
3896               else {
3897                 emit_movimm(value,hr);
3898               }
3899             }
3900           }
3901         }
3902       }
3903     }
3904   }
3905 }
3906 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
3907 {
3908   int hr;
3909   // Load 32-bit regs
3910   for(hr=0;hr<HOST_REGS;hr++) {
3911     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3912       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
3913         int value=constmap[i][hr];
3914         if(value==0) {
3915           emit_zeroreg(hr);
3916         }
3917         else {
3918           emit_movimm(value,hr);
3919         }
3920       }
3921     }
3922   }
3923   // Load 64-bit regs
3924   for(hr=0;hr<HOST_REGS;hr++) {
3925     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
3926       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
3927         if((is32>>(regmap[hr]&63))&1) {
3928           int lr=get_reg(regmap,regmap[hr]-64);
3929           assert(lr>=0);
3930           emit_sarimm(lr,31,hr);
3931         }
3932         else
3933         {
3934           int value=constmap[i][hr];
3935           if(value==0) {
3936             emit_zeroreg(hr);
3937           }
3938           else {
3939             emit_movimm(value,hr);
3940           }
3941         }
3942       }
3943     }
3944   }
3945 }
3946
3947 // Write out all dirty registers (except cycle count)
3948 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
3949 {
3950   int hr;
3951   for(hr=0;hr<HOST_REGS;hr++) {
3952     if(hr!=EXCLUDE_REG) {
3953       if(i_regmap[hr]>0) {
3954         if(i_regmap[hr]!=CCREG) {
3955           if((i_dirty>>hr)&1) {
3956             if(i_regmap[hr]<64) {
3957               emit_storereg(i_regmap[hr],hr);
3958             }else{
3959               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3960                 emit_storereg(i_regmap[hr],hr);
3961               }
3962             }
3963           }
3964         }
3965       }
3966     }
3967   }
3968 }
3969 // Write out dirty registers that we need to reload (pair with load_needed_regs)
3970 // This writes the registers not written by store_regs_bt
3971 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
3972 {
3973   int hr;
3974   int t=(addr-start)>>2;
3975   for(hr=0;hr<HOST_REGS;hr++) {
3976     if(hr!=EXCLUDE_REG) {
3977       if(i_regmap[hr]>0) {
3978         if(i_regmap[hr]!=CCREG) {
3979           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
3980             if((i_dirty>>hr)&1) {
3981               if(i_regmap[hr]<64) {
3982                 emit_storereg(i_regmap[hr],hr);
3983               }else{
3984                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
3985                   emit_storereg(i_regmap[hr],hr);
3986                 }
3987               }
3988             }
3989           }
3990         }
3991       }
3992     }
3993   }
3994 }
3995
3996 // Load all registers (except cycle count)
3997 void load_all_regs(signed char i_regmap[])
3998 {
3999   int hr;
4000   for(hr=0;hr<HOST_REGS;hr++) {
4001     if(hr!=EXCLUDE_REG) {
4002       if(i_regmap[hr]==0) {
4003         emit_zeroreg(hr);
4004       }
4005       else
4006       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4007       {
4008         emit_loadreg(i_regmap[hr],hr);
4009       }
4010     }
4011   }
4012 }
4013
4014 // Load all current registers also needed by next instruction
4015 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4016 {
4017   int hr;
4018   for(hr=0;hr<HOST_REGS;hr++) {
4019     if(hr!=EXCLUDE_REG) {
4020       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4021         if(i_regmap[hr]==0) {
4022           emit_zeroreg(hr);
4023         }
4024         else
4025         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4026         {
4027           emit_loadreg(i_regmap[hr],hr);
4028         }
4029       }
4030     }
4031   }
4032 }
4033
4034 // Load all regs, storing cycle count if necessary
4035 void load_regs_entry(int t)
4036 {
4037   int hr;
4038   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4039   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4040   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4041     emit_storereg(CCREG,HOST_CCREG);
4042   }
4043   // Load 32-bit regs
4044   for(hr=0;hr<HOST_REGS;hr++) {
4045     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4046       if(regs[t].regmap_entry[hr]==0) {
4047         emit_zeroreg(hr);
4048       }
4049       else if(regs[t].regmap_entry[hr]!=CCREG)
4050       {
4051         emit_loadreg(regs[t].regmap_entry[hr],hr);
4052       }
4053     }
4054   }
4055   // Load 64-bit regs
4056   for(hr=0;hr<HOST_REGS;hr++) {
4057     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4058       assert(regs[t].regmap_entry[hr]!=64);
4059       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4060         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4061         if(lr<0) {
4062           emit_loadreg(regs[t].regmap_entry[hr],hr);
4063         }
4064         else
4065         {
4066           emit_sarimm(lr,31,hr);
4067         }
4068       }
4069       else
4070       {
4071         emit_loadreg(regs[t].regmap_entry[hr],hr);
4072       }
4073     }
4074   }
4075 }
4076
4077 // Store dirty registers prior to branch
4078 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4079 {
4080   if(internal_branch(i_is32,addr))
4081   {
4082     int t=(addr-start)>>2;
4083     int hr;
4084     for(hr=0;hr<HOST_REGS;hr++) {
4085       if(hr!=EXCLUDE_REG) {
4086         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4087           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4088             if((i_dirty>>hr)&1) {
4089               if(i_regmap[hr]<64) {
4090                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4091                   emit_storereg(i_regmap[hr],hr);
4092                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4093                     #ifdef DESTRUCTIVE_WRITEBACK
4094                     emit_sarimm(hr,31,hr);
4095                     emit_storereg(i_regmap[hr]|64,hr);
4096                     #else
4097                     emit_sarimm(hr,31,HOST_TEMPREG);
4098                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4099                     #endif
4100                   }
4101                 }
4102               }else{
4103                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4104                   emit_storereg(i_regmap[hr],hr);
4105                 }
4106               }
4107             }
4108           }
4109         }
4110       }
4111     }
4112   }
4113   else
4114   {
4115     // Branch out of this block, write out all dirty regs
4116     wb_dirtys(i_regmap,i_is32,i_dirty);
4117   }
4118 }
4119
4120 // Load all needed registers for branch target
4121 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4122 {
4123   //if(addr>=start && addr<(start+slen*4))
4124   if(internal_branch(i_is32,addr))
4125   {
4126     int t=(addr-start)>>2;
4127     int hr;
4128     // Store the cycle count before loading something else
4129     if(i_regmap[HOST_CCREG]!=CCREG) {
4130       assert(i_regmap[HOST_CCREG]==-1);
4131     }
4132     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4133       emit_storereg(CCREG,HOST_CCREG);
4134     }
4135     // Load 32-bit regs
4136     for(hr=0;hr<HOST_REGS;hr++) {
4137       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4138         #ifdef DESTRUCTIVE_WRITEBACK
4139         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4140         #else
4141         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4142         #endif
4143           if(regs[t].regmap_entry[hr]==0) {
4144             emit_zeroreg(hr);
4145           }
4146           else if(regs[t].regmap_entry[hr]!=CCREG)
4147           {
4148             emit_loadreg(regs[t].regmap_entry[hr],hr);
4149           }
4150         }
4151       }
4152     }
4153     //Load 64-bit regs
4154     for(hr=0;hr<HOST_REGS;hr++) {
4155       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4156         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4157           assert(regs[t].regmap_entry[hr]!=64);
4158           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4159             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4160             if(lr<0) {
4161               emit_loadreg(regs[t].regmap_entry[hr],hr);
4162             }
4163             else
4164             {
4165               emit_sarimm(lr,31,hr);
4166             }
4167           }
4168           else
4169           {
4170             emit_loadreg(regs[t].regmap_entry[hr],hr);
4171           }
4172         }
4173         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4174           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4175           assert(lr>=0);
4176           emit_sarimm(lr,31,hr);
4177         }
4178       }
4179     }
4180   }
4181 }
4182
4183 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4184 {
4185   if(addr>=start && addr<start+slen*4-4)
4186   {
4187     int t=(addr-start)>>2;
4188     int hr;
4189     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4190     for(hr=0;hr<HOST_REGS;hr++)
4191     {
4192       if(hr!=EXCLUDE_REG)
4193       {
4194         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4195         {
4196           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4197           {
4198             return 0;
4199           }
4200           else
4201           if((i_dirty>>hr)&1)
4202           {
4203             if(i_regmap[hr]<TEMPREG)
4204             {
4205               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4206                 return 0;
4207             }
4208             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4209             {
4210               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4211                 return 0;
4212             }
4213           }
4214         }
4215         else // Same register but is it 32-bit or dirty?
4216         if(i_regmap[hr]>=0)
4217         {
4218           if(!((regs[t].dirty>>hr)&1))
4219           {
4220             if((i_dirty>>hr)&1)
4221             {
4222               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4223               {
4224                 //printf("%x: dirty no match\n",addr);
4225                 return 0;
4226               }
4227             }
4228           }
4229           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4230           {
4231             //printf("%x: is32 no match\n",addr);
4232             return 0;
4233           }
4234         }
4235       }
4236     }
4237     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4238     // Delay slots are not valid branch targets
4239     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4240     // Delay slots require additional processing, so do not match
4241     if(is_ds[t]) return 0;
4242   }
4243   else
4244   {
4245     int hr;
4246     for(hr=0;hr<HOST_REGS;hr++)
4247     {
4248       if(hr!=EXCLUDE_REG)
4249       {
4250         if(i_regmap[hr]>=0)
4251         {
4252           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4253           {
4254             if((i_dirty>>hr)&1)
4255             {
4256               return 0;
4257             }
4258           }
4259         }
4260       }
4261     }
4262   }
4263   return 1;
4264 }
4265
4266 // Used when a branch jumps into the delay slot of another branch
4267 void ds_assemble_entry(int i)
4268 {
4269   int t=(ba[i]-start)>>2;
4270   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4271   assem_debug("Assemble delay slot at %x\n",ba[i]);
4272   assem_debug("<->\n");
4273   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4274     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4275   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4276   address_generation(t,&regs[t],regs[t].regmap_entry);
4277   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4278     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4279   cop1_usable=0;
4280   is_delayslot=0;
4281   switch(itype[t]) {
4282     case ALU:
4283       alu_assemble(t,&regs[t]);break;
4284     case IMM16:
4285       imm16_assemble(t,&regs[t]);break;
4286     case SHIFT:
4287       shift_assemble(t,&regs[t]);break;
4288     case SHIFTIMM:
4289       shiftimm_assemble(t,&regs[t]);break;
4290     case LOAD:
4291       load_assemble(t,&regs[t]);break;
4292     case LOADLR:
4293       loadlr_assemble(t,&regs[t]);break;
4294     case STORE:
4295       store_assemble(t,&regs[t]);break;
4296     case STORELR:
4297       storelr_assemble(t,&regs[t]);break;
4298     case COP0:
4299       cop0_assemble(t,&regs[t]);break;
4300     case COP1:
4301       cop1_assemble(t,&regs[t]);break;
4302     case C1LS:
4303       c1ls_assemble(t,&regs[t]);break;
4304     case COP2:
4305       cop2_assemble(t,&regs[t]);break;
4306     case C2LS:
4307       c2ls_assemble(t,&regs[t]);break;
4308     case C2OP:
4309       c2op_assemble(t,&regs[t]);break;
4310     case FCONV:
4311       fconv_assemble(t,&regs[t]);break;
4312     case FLOAT:
4313       float_assemble(t,&regs[t]);break;
4314     case FCOMP:
4315       fcomp_assemble(t,&regs[t]);break;
4316     case MULTDIV:
4317       multdiv_assemble(t,&regs[t]);break;
4318     case MOV:
4319       mov_assemble(t,&regs[t]);break;
4320     case SYSCALL:
4321     case HLECALL:
4322     case INTCALL:
4323     case SPAN:
4324     case UJUMP:
4325     case RJUMP:
4326     case CJUMP:
4327     case SJUMP:
4328     case FJUMP:
4329       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4330   }
4331   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4332   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4333   if(internal_branch(regs[t].is32,ba[i]+4))
4334     assem_debug("branch: internal\n");
4335   else
4336     assem_debug("branch: external\n");
4337   assert(internal_branch(regs[t].is32,ba[i]+4));
4338   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4339   emit_jmp(0);
4340 }
4341
4342 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4343 {
4344   int count;
4345   int jaddr;
4346   int idle=0;
4347   int t=0;
4348   if(itype[i]==RJUMP)
4349   {
4350     *adj=0;
4351   }
4352   //if(ba[i]>=start && ba[i]<(start+slen*4))
4353   if(internal_branch(branch_regs[i].is32,ba[i]))
4354   {
4355     t=(ba[i]-start)>>2;
4356     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4357     else *adj=ccadj[t];
4358   }
4359   else
4360   {
4361     *adj=0;
4362   }
4363   count=ccadj[i];
4364   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4365     // Idle loop
4366     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4367     idle=(int)out;
4368     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4369     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4370     jaddr=(int)out;
4371     emit_jmp(0);
4372   }
4373   else if(*adj==0||invert) {
4374     int cycles=CLOCK_ADJUST(count+2);
4375     // faster loop HACK
4376     if (t&&*adj) {
4377       int rel=t-i;
4378       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4379         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4380     }
4381     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4382     jaddr=(int)out;
4383     emit_jns(0);
4384   }
4385   else
4386   {
4387     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4388     jaddr=(int)out;
4389     emit_jns(0);
4390   }
4391   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4392 }
4393
4394 void do_ccstub(int n)
4395 {
4396   literal_pool(256);
4397   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4398   set_jump_target(stubs[n][1],(int)out);
4399   int i=stubs[n][4];
4400   if(stubs[n][6]==NULLDS) {
4401     // Delay slot instruction is nullified ("likely" branch)
4402     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4403   }
4404   else if(stubs[n][6]!=TAKEN) {
4405     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4406   }
4407   else {
4408     if(internal_branch(branch_regs[i].is32,ba[i]))
4409       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4410   }
4411   if(stubs[n][5]!=-1)
4412   {
4413     // Save PC as return address
4414     emit_movimm(stubs[n][5],EAX);
4415     emit_writeword(EAX,(int)&pcaddr);
4416   }
4417   else
4418   {
4419     // Return address depends on which way the branch goes
4420     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4421     {
4422       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4423       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4424       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4425       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4426       if(rs1[i]==0)
4427       {
4428         s1l=s2l;s1h=s2h;
4429         s2l=s2h=-1;
4430       }
4431       else if(rs2[i]==0)
4432       {
4433         s2l=s2h=-1;
4434       }
4435       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4436         s1h=s2h=-1;
4437       }
4438       assert(s1l>=0);
4439       #ifdef DESTRUCTIVE_WRITEBACK
4440       if(rs1[i]) {
4441         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4442           emit_loadreg(rs1[i],s1l);
4443       }
4444       else {
4445         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4446           emit_loadreg(rs2[i],s1l);
4447       }
4448       if(s2l>=0)
4449         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4450           emit_loadreg(rs2[i],s2l);
4451       #endif
4452       int hr=0;
4453       int addr=-1,alt=-1,ntaddr=-1;
4454       while(hr<HOST_REGS)
4455       {
4456         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4457            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4458            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4459         {
4460           addr=hr++;break;
4461         }
4462         hr++;
4463       }
4464       while(hr<HOST_REGS)
4465       {
4466         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4467            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4468            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4469         {
4470           alt=hr++;break;
4471         }
4472         hr++;
4473       }
4474       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
4475       {
4476         while(hr<HOST_REGS)
4477         {
4478           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4479              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4480              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4481           {
4482             ntaddr=hr;break;
4483           }
4484           hr++;
4485         }
4486         assert(hr<HOST_REGS);
4487       }
4488       if((opcode[i]&0x2f)==4) // BEQ
4489       {
4490         #ifdef HAVE_CMOV_IMM
4491         if(s1h<0) {
4492           if(s2l>=0) emit_cmp(s1l,s2l);
4493           else emit_test(s1l,s1l);
4494           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
4495         }
4496         else
4497         #endif
4498         {
4499           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4500           if(s1h>=0) {
4501             if(s2h>=0) emit_cmp(s1h,s2h);
4502             else emit_test(s1h,s1h);
4503             emit_cmovne_reg(alt,addr);
4504           }
4505           if(s2l>=0) emit_cmp(s1l,s2l);
4506           else emit_test(s1l,s1l);
4507           emit_cmovne_reg(alt,addr);
4508         }
4509       }
4510       if((opcode[i]&0x2f)==5) // BNE
4511       {
4512         #ifdef HAVE_CMOV_IMM
4513         if(s1h<0) {
4514           if(s2l>=0) emit_cmp(s1l,s2l);
4515           else emit_test(s1l,s1l);
4516           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
4517         }
4518         else
4519         #endif
4520         {
4521           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
4522           if(s1h>=0) {
4523             if(s2h>=0) emit_cmp(s1h,s2h);
4524             else emit_test(s1h,s1h);
4525             emit_cmovne_reg(alt,addr);
4526           }
4527           if(s2l>=0) emit_cmp(s1l,s2l);
4528           else emit_test(s1l,s1l);
4529           emit_cmovne_reg(alt,addr);
4530         }
4531       }
4532       if((opcode[i]&0x2f)==6) // BLEZ
4533       {
4534         //emit_movimm(ba[i],alt);
4535         //emit_movimm(start+i*4+8,addr);
4536         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4537         emit_cmpimm(s1l,1);
4538         if(s1h>=0) emit_mov(addr,ntaddr);
4539         emit_cmovl_reg(alt,addr);
4540         if(s1h>=0) {
4541           emit_test(s1h,s1h);
4542           emit_cmovne_reg(ntaddr,addr);
4543           emit_cmovs_reg(alt,addr);
4544         }
4545       }
4546       if((opcode[i]&0x2f)==7) // BGTZ
4547       {
4548         //emit_movimm(ba[i],addr);
4549         //emit_movimm(start+i*4+8,ntaddr);
4550         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
4551         emit_cmpimm(s1l,1);
4552         if(s1h>=0) emit_mov(addr,alt);
4553         emit_cmovl_reg(ntaddr,addr);
4554         if(s1h>=0) {
4555           emit_test(s1h,s1h);
4556           emit_cmovne_reg(alt,addr);
4557           emit_cmovs_reg(ntaddr,addr);
4558         }
4559       }
4560       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
4561       {
4562         //emit_movimm(ba[i],alt);
4563         //emit_movimm(start+i*4+8,addr);
4564         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4565         if(s1h>=0) emit_test(s1h,s1h);
4566         else emit_test(s1l,s1l);
4567         emit_cmovs_reg(alt,addr);
4568       }
4569       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
4570       {
4571         //emit_movimm(ba[i],addr);
4572         //emit_movimm(start+i*4+8,alt);
4573         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4574         if(s1h>=0) emit_test(s1h,s1h);
4575         else emit_test(s1l,s1l);
4576         emit_cmovs_reg(alt,addr);
4577       }
4578       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
4579         if(source[i]&0x10000) // BC1T
4580         {
4581           //emit_movimm(ba[i],alt);
4582           //emit_movimm(start+i*4+8,addr);
4583           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
4584           emit_testimm(s1l,0x800000);
4585           emit_cmovne_reg(alt,addr);
4586         }
4587         else // BC1F
4588         {
4589           //emit_movimm(ba[i],addr);
4590           //emit_movimm(start+i*4+8,alt);
4591           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
4592           emit_testimm(s1l,0x800000);
4593           emit_cmovne_reg(alt,addr);
4594         }
4595       }
4596       emit_writeword(addr,(int)&pcaddr);
4597     }
4598     else
4599     if(itype[i]==RJUMP)
4600     {
4601       int r=get_reg(branch_regs[i].regmap,rs1[i]);
4602       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4603         r=get_reg(branch_regs[i].regmap,RTEMP);
4604       }
4605       emit_writeword(r,(int)&pcaddr);
4606     }
4607     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
4608   }
4609   // Update cycle count
4610   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
4611   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4612   emit_call((int)cc_interrupt);
4613   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
4614   if(stubs[n][6]==TAKEN) {
4615     if(internal_branch(branch_regs[i].is32,ba[i]))
4616       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
4617     else if(itype[i]==RJUMP) {
4618       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
4619         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
4620       else
4621         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
4622     }
4623   }else if(stubs[n][6]==NOTTAKEN) {
4624     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
4625     else load_all_regs(branch_regs[i].regmap);
4626   }else if(stubs[n][6]==NULLDS) {
4627     // Delay slot instruction is nullified ("likely" branch)
4628     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
4629     else load_all_regs(regs[i].regmap);
4630   }else{
4631     load_all_regs(branch_regs[i].regmap);
4632   }
4633   emit_jmp(stubs[n][2]); // return address
4634
4635   /* This works but uses a lot of memory...
4636   emit_readword((int)&last_count,ECX);
4637   emit_add(HOST_CCREG,ECX,EAX);
4638   emit_writeword(EAX,(int)&Count);
4639   emit_call((int)gen_interupt);
4640   emit_readword((int)&Count,HOST_CCREG);
4641   emit_readword((int)&next_interupt,EAX);
4642   emit_readword((int)&pending_exception,EBX);
4643   emit_writeword(EAX,(int)&last_count);
4644   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
4645   emit_test(EBX,EBX);
4646   int jne_instr=(int)out;
4647   emit_jne(0);
4648   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
4649   load_all_regs(branch_regs[i].regmap);
4650   emit_jmp(stubs[n][2]); // return address
4651   set_jump_target(jne_instr,(int)out);
4652   emit_readword((int)&pcaddr,EAX);
4653   // Call get_addr_ht instead of doing the hash table here.
4654   // This code is executed infrequently and takes up a lot of space
4655   // so smaller is better.
4656   emit_storereg(CCREG,HOST_CCREG);
4657   emit_pushreg(EAX);
4658   emit_call((int)get_addr_ht);
4659   emit_loadreg(CCREG,HOST_CCREG);
4660   emit_addimm(ESP,4,ESP);
4661   emit_jmpreg(EAX);*/
4662 }
4663
4664 static void add_to_linker(int addr,int target,int ext)
4665 {
4666   link_addr[linkcount][0]=addr;
4667   link_addr[linkcount][1]=target;
4668   link_addr[linkcount][2]=ext;
4669   linkcount++;
4670 }
4671
4672 static void ujump_assemble_write_ra(int i)
4673 {
4674   int rt;
4675   unsigned int return_address;
4676   rt=get_reg(branch_regs[i].regmap,31);
4677   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4678   //assert(rt>=0);
4679   return_address=start+i*4+8;
4680   if(rt>=0) {
4681     #ifdef USE_MINI_HT
4682     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
4683       int temp=-1; // note: must be ds-safe
4684       #ifdef HOST_TEMPREG
4685       temp=HOST_TEMPREG;
4686       #endif
4687       if(temp>=0) do_miniht_insert(return_address,rt,temp);
4688       else emit_movimm(return_address,rt);
4689     }
4690     else
4691     #endif
4692     {
4693       #ifdef REG_PREFETCH
4694       if(temp>=0)
4695       {
4696         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4697       }
4698       #endif
4699       emit_movimm(return_address,rt); // PC into link register
4700       #ifdef IMM_PREFETCH
4701       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4702       #endif
4703     }
4704   }
4705 }
4706
4707 void ujump_assemble(int i,struct regstat *i_regs)
4708 {
4709   int ra_done=0;
4710   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4711   address_generation(i+1,i_regs,regs[i].regmap_entry);
4712   #ifdef REG_PREFETCH
4713   int temp=get_reg(branch_regs[i].regmap,PTEMP);
4714   if(rt1[i]==31&&temp>=0)
4715   {
4716     signed char *i_regmap=i_regs->regmap;
4717     int return_address=start+i*4+8;
4718     if(get_reg(branch_regs[i].regmap,31)>0)
4719     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4720   }
4721   #endif
4722   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4723     ujump_assemble_write_ra(i); // writeback ra for DS
4724     ra_done=1;
4725   }
4726   ds_assemble(i+1,i_regs);
4727   uint64_t bc_unneeded=branch_regs[i].u;
4728   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4729   bc_unneeded|=1|(1LL<<rt1[i]);
4730   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4731   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4732                 bc_unneeded,bc_unneeded_upper);
4733   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4734   if(!ra_done&&rt1[i]==31)
4735     ujump_assemble_write_ra(i);
4736   int cc,adj;
4737   cc=get_reg(branch_regs[i].regmap,CCREG);
4738   assert(cc==HOST_CCREG);
4739   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4740   #ifdef REG_PREFETCH
4741   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4742   #endif
4743   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4744   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4745   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4746   if(internal_branch(branch_regs[i].is32,ba[i]))
4747     assem_debug("branch: internal\n");
4748   else
4749     assem_debug("branch: external\n");
4750   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
4751     ds_assemble_entry(i);
4752   }
4753   else {
4754     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
4755     emit_jmp(0);
4756   }
4757 }
4758
4759 static void rjump_assemble_write_ra(int i)
4760 {
4761   int rt,return_address;
4762   assert(rt1[i+1]!=rt1[i]);
4763   assert(rt2[i+1]!=rt1[i]);
4764   rt=get_reg(branch_regs[i].regmap,rt1[i]);
4765   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
4766   assert(rt>=0);
4767   return_address=start+i*4+8;
4768   #ifdef REG_PREFETCH
4769   if(temp>=0)
4770   {
4771     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4772   }
4773   #endif
4774   emit_movimm(return_address,rt); // PC into link register
4775   #ifdef IMM_PREFETCH
4776   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
4777   #endif
4778 }
4779
4780 void rjump_assemble(int i,struct regstat *i_regs)
4781 {
4782   int temp;
4783   int rs,cc;
4784   int ra_done=0;
4785   rs=get_reg(branch_regs[i].regmap,rs1[i]);
4786   assert(rs>=0);
4787   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
4788     // Delay slot abuse, make a copy of the branch address register
4789     temp=get_reg(branch_regs[i].regmap,RTEMP);
4790     assert(temp>=0);
4791     assert(regs[i].regmap[temp]==RTEMP);
4792     emit_mov(rs,temp);
4793     rs=temp;
4794   }
4795   address_generation(i+1,i_regs,regs[i].regmap_entry);
4796   #ifdef REG_PREFETCH
4797   if(rt1[i]==31)
4798   {
4799     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
4800       signed char *i_regmap=i_regs->regmap;
4801       int return_address=start+i*4+8;
4802       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
4803     }
4804   }
4805   #endif
4806   #ifdef USE_MINI_HT
4807   if(rs1[i]==31) {
4808     int rh=get_reg(regs[i].regmap,RHASH);
4809     if(rh>=0) do_preload_rhash(rh);
4810   }
4811   #endif
4812   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
4813     rjump_assemble_write_ra(i);
4814     ra_done=1;
4815   }
4816   ds_assemble(i+1,i_regs);
4817   uint64_t bc_unneeded=branch_regs[i].u;
4818   uint64_t bc_unneeded_upper=branch_regs[i].uu;
4819   bc_unneeded|=1|(1LL<<rt1[i]);
4820   bc_unneeded_upper|=1|(1LL<<rt1[i]);
4821   bc_unneeded&=~(1LL<<rs1[i]);
4822   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4823                 bc_unneeded,bc_unneeded_upper);
4824   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
4825   if(!ra_done&&rt1[i]!=0)
4826     rjump_assemble_write_ra(i);
4827   cc=get_reg(branch_regs[i].regmap,CCREG);
4828   assert(cc==HOST_CCREG);
4829   (void)cc;
4830   #ifdef USE_MINI_HT
4831   int rh=get_reg(branch_regs[i].regmap,RHASH);
4832   int ht=get_reg(branch_regs[i].regmap,RHTBL);
4833   if(rs1[i]==31) {
4834     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
4835     do_preload_rhtbl(ht);
4836     do_rhash(rs,rh);
4837   }
4838   #endif
4839   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4840   #ifdef DESTRUCTIVE_WRITEBACK
4841   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
4842     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
4843       emit_loadreg(rs1[i],rs);
4844     }
4845   }
4846   #endif
4847   #ifdef REG_PREFETCH
4848   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
4849   #endif
4850   #ifdef USE_MINI_HT
4851   if(rs1[i]==31) {
4852     do_miniht_load(ht,rh);
4853   }
4854   #endif
4855   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
4856   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
4857   //assert(adj==0);
4858   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
4859   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
4860   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
4861     // special case for RFE
4862     emit_jmp(0);
4863   else
4864     emit_jns(0);
4865   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
4866   #ifdef USE_MINI_HT
4867   if(rs1[i]==31) {
4868     do_miniht_jump(rs,rh,ht);
4869   }
4870   else
4871   #endif
4872   {
4873     //if(rs!=EAX) emit_mov(rs,EAX);
4874     //emit_jmp((int)jump_vaddr_eax);
4875     emit_jmp(jump_vaddr_reg[rs]);
4876   }
4877   /* Check hash table
4878   temp=!rs;
4879   emit_mov(rs,temp);
4880   emit_shrimm(rs,16,rs);
4881   emit_xor(temp,rs,rs);
4882   emit_movzwl_reg(rs,rs);
4883   emit_shlimm(rs,4,rs);
4884   emit_cmpmem_indexed((int)hash_table,rs,temp);
4885   emit_jne((int)out+14);
4886   emit_readword_indexed((int)hash_table+4,rs,rs);
4887   emit_jmpreg(rs);
4888   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
4889   emit_addimm_no_flags(8,rs);
4890   emit_jeq((int)out-17);
4891   // No hit on hash table, call compiler
4892   emit_pushreg(temp);
4893 //DEBUG >
4894 #ifdef DEBUG_CYCLE_COUNT
4895   emit_readword((int)&last_count,ECX);
4896   emit_add(HOST_CCREG,ECX,HOST_CCREG);
4897   emit_readword((int)&next_interupt,ECX);
4898   emit_writeword(HOST_CCREG,(int)&Count);
4899   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
4900   emit_writeword(ECX,(int)&last_count);
4901 #endif
4902 //DEBUG <
4903   emit_storereg(CCREG,HOST_CCREG);
4904   emit_call((int)get_addr);
4905   emit_loadreg(CCREG,HOST_CCREG);
4906   emit_addimm(ESP,4,ESP);
4907   emit_jmpreg(EAX);*/
4908   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4909   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
4910   #endif
4911 }
4912
4913 void cjump_assemble(int i,struct regstat *i_regs)
4914 {
4915   signed char *i_regmap=i_regs->regmap;
4916   int cc;
4917   int match;
4918   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4919   assem_debug("match=%d\n",match);
4920   int s1h,s1l,s2h,s2l;
4921   int prev_cop1_usable=cop1_usable;
4922   int unconditional=0,nop=0;
4923   int only32=0;
4924   int invert=0;
4925   int internal=internal_branch(branch_regs[i].is32,ba[i]);
4926   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
4927   if(!match) invert=1;
4928   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
4929   if(i>(ba[i]-start)>>2) invert=1;
4930   #endif
4931
4932   if(ooo[i]) {
4933     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4934     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4935     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4936     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4937   }
4938   else {
4939     s1l=get_reg(i_regmap,rs1[i]);
4940     s1h=get_reg(i_regmap,rs1[i]|64);
4941     s2l=get_reg(i_regmap,rs2[i]);
4942     s2h=get_reg(i_regmap,rs2[i]|64);
4943   }
4944   if(rs1[i]==0&&rs2[i]==0)
4945   {
4946     if(opcode[i]&1) nop=1;
4947     else unconditional=1;
4948     //assert(opcode[i]!=5);
4949     //assert(opcode[i]!=7);
4950     //assert(opcode[i]!=0x15);
4951     //assert(opcode[i]!=0x17);
4952   }
4953   else if(rs1[i]==0)
4954   {
4955     s1l=s2l;s1h=s2h;
4956     s2l=s2h=-1;
4957     only32=(regs[i].was32>>rs2[i])&1;
4958   }
4959   else if(rs2[i]==0)
4960   {
4961     s2l=s2h=-1;
4962     only32=(regs[i].was32>>rs1[i])&1;
4963   }
4964   else {
4965     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
4966   }
4967
4968   if(ooo[i]) {
4969     // Out of order execution (delay slot first)
4970     //printf("OOOE\n");
4971     address_generation(i+1,i_regs,regs[i].regmap_entry);
4972     ds_assemble(i+1,i_regs);
4973     int adj;
4974     uint64_t bc_unneeded=branch_regs[i].u;
4975     uint64_t bc_unneeded_upper=branch_regs[i].uu;
4976     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
4977     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
4978     bc_unneeded|=1;
4979     bc_unneeded_upper|=1;
4980     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
4981                   bc_unneeded,bc_unneeded_upper);
4982     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
4983     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
4984     cc=get_reg(branch_regs[i].regmap,CCREG);
4985     assert(cc==HOST_CCREG);
4986     if(unconditional)
4987       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4988     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
4989     //assem_debug("cycle count (adj)\n");
4990     if(unconditional) {
4991       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
4992       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
4993         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
4994         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4995         if(internal)
4996           assem_debug("branch: internal\n");
4997         else
4998           assem_debug("branch: external\n");
4999         if(internal&&is_ds[(ba[i]-start)>>2]) {
5000           ds_assemble_entry(i);
5001         }
5002         else {
5003           add_to_linker((int)out,ba[i],internal);
5004           emit_jmp(0);
5005         }
5006         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5007         if(((u_int)out)&7) emit_addnop(0);
5008         #endif
5009       }
5010     }
5011     else if(nop) {
5012       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5013       int jaddr=(int)out;
5014       emit_jns(0);
5015       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5016     }
5017     else {
5018       int taken=0,nottaken=0,nottaken1=0;
5019       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5020       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5021       if(!only32)
5022       {
5023         assert(s1h>=0);
5024         if(opcode[i]==4) // BEQ
5025         {
5026           if(s2h>=0) emit_cmp(s1h,s2h);
5027           else emit_test(s1h,s1h);
5028           nottaken1=(int)out;
5029           emit_jne(1);
5030         }
5031         if(opcode[i]==5) // BNE
5032         {
5033           if(s2h>=0) emit_cmp(s1h,s2h);
5034           else emit_test(s1h,s1h);
5035           if(invert) taken=(int)out;
5036           else add_to_linker((int)out,ba[i],internal);
5037           emit_jne(0);
5038         }
5039         if(opcode[i]==6) // BLEZ
5040         {
5041           emit_test(s1h,s1h);
5042           if(invert) taken=(int)out;
5043           else add_to_linker((int)out,ba[i],internal);
5044           emit_js(0);
5045           nottaken1=(int)out;
5046           emit_jne(1);
5047         }
5048         if(opcode[i]==7) // BGTZ
5049         {
5050           emit_test(s1h,s1h);
5051           nottaken1=(int)out;
5052           emit_js(1);
5053           if(invert) taken=(int)out;
5054           else add_to_linker((int)out,ba[i],internal);
5055           emit_jne(0);
5056         }
5057       } // if(!only32)
5058
5059       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5060       assert(s1l>=0);
5061       if(opcode[i]==4) // BEQ
5062       {
5063         if(s2l>=0) emit_cmp(s1l,s2l);
5064         else emit_test(s1l,s1l);
5065         if(invert){
5066           nottaken=(int)out;
5067           emit_jne(1);
5068         }else{
5069           add_to_linker((int)out,ba[i],internal);
5070           emit_jeq(0);
5071         }
5072       }
5073       if(opcode[i]==5) // BNE
5074       {
5075         if(s2l>=0) emit_cmp(s1l,s2l);
5076         else emit_test(s1l,s1l);
5077         if(invert){
5078           nottaken=(int)out;
5079           emit_jeq(1);
5080         }else{
5081           add_to_linker((int)out,ba[i],internal);
5082           emit_jne(0);
5083         }
5084       }
5085       if(opcode[i]==6) // BLEZ
5086       {
5087         emit_cmpimm(s1l,1);
5088         if(invert){
5089           nottaken=(int)out;
5090           emit_jge(1);
5091         }else{
5092           add_to_linker((int)out,ba[i],internal);
5093           emit_jl(0);
5094         }
5095       }
5096       if(opcode[i]==7) // BGTZ
5097       {
5098         emit_cmpimm(s1l,1);
5099         if(invert){
5100           nottaken=(int)out;
5101           emit_jl(1);
5102         }else{
5103           add_to_linker((int)out,ba[i],internal);
5104           emit_jge(0);
5105         }
5106       }
5107       if(invert) {
5108         if(taken) set_jump_target(taken,(int)out);
5109         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5110         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5111           if(adj) {
5112             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5113             add_to_linker((int)out,ba[i],internal);
5114           }else{
5115             emit_addnop(13);
5116             add_to_linker((int)out,ba[i],internal*2);
5117           }
5118           emit_jmp(0);
5119         }else
5120         #endif
5121         {
5122           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5123           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5124           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5125           if(internal)
5126             assem_debug("branch: internal\n");
5127           else
5128             assem_debug("branch: external\n");
5129           if(internal&&is_ds[(ba[i]-start)>>2]) {
5130             ds_assemble_entry(i);
5131           }
5132           else {
5133             add_to_linker((int)out,ba[i],internal);
5134             emit_jmp(0);
5135           }
5136         }
5137         set_jump_target(nottaken,(int)out);
5138       }
5139
5140       if(nottaken1) set_jump_target(nottaken1,(int)out);
5141       if(adj) {
5142         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5143       }
5144     } // (!unconditional)
5145   } // if(ooo)
5146   else
5147   {
5148     // In-order execution (branch first)
5149     //if(likely[i]) printf("IOL\n");
5150     //else
5151     //printf("IOE\n");
5152     int taken=0,nottaken=0,nottaken1=0;
5153     if(!unconditional&&!nop) {
5154       if(!only32)
5155       {
5156         assert(s1h>=0);
5157         if((opcode[i]&0x2f)==4) // BEQ
5158         {
5159           if(s2h>=0) emit_cmp(s1h,s2h);
5160           else emit_test(s1h,s1h);
5161           nottaken1=(int)out;
5162           emit_jne(2);
5163         }
5164         if((opcode[i]&0x2f)==5) // BNE
5165         {
5166           if(s2h>=0) emit_cmp(s1h,s2h);
5167           else emit_test(s1h,s1h);
5168           taken=(int)out;
5169           emit_jne(1);
5170         }
5171         if((opcode[i]&0x2f)==6) // BLEZ
5172         {
5173           emit_test(s1h,s1h);
5174           taken=(int)out;
5175           emit_js(1);
5176           nottaken1=(int)out;
5177           emit_jne(2);
5178         }
5179         if((opcode[i]&0x2f)==7) // BGTZ
5180         {
5181           emit_test(s1h,s1h);
5182           nottaken1=(int)out;
5183           emit_js(2);
5184           taken=(int)out;
5185           emit_jne(1);
5186         }
5187       } // if(!only32)
5188
5189       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5190       assert(s1l>=0);
5191       if((opcode[i]&0x2f)==4) // BEQ
5192       {
5193         if(s2l>=0) emit_cmp(s1l,s2l);
5194         else emit_test(s1l,s1l);
5195         nottaken=(int)out;
5196         emit_jne(2);
5197       }
5198       if((opcode[i]&0x2f)==5) // BNE
5199       {
5200         if(s2l>=0) emit_cmp(s1l,s2l);
5201         else emit_test(s1l,s1l);
5202         nottaken=(int)out;
5203         emit_jeq(2);
5204       }
5205       if((opcode[i]&0x2f)==6) // BLEZ
5206       {
5207         emit_cmpimm(s1l,1);
5208         nottaken=(int)out;
5209         emit_jge(2);
5210       }
5211       if((opcode[i]&0x2f)==7) // BGTZ
5212       {
5213         emit_cmpimm(s1l,1);
5214         nottaken=(int)out;
5215         emit_jl(2);
5216       }
5217     } // if(!unconditional)
5218     int adj;
5219     uint64_t ds_unneeded=branch_regs[i].u;
5220     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5221     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5222     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5223     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5224     ds_unneeded|=1;
5225     ds_unneeded_upper|=1;
5226     // branch taken
5227     if(!nop) {
5228       if(taken) set_jump_target(taken,(int)out);
5229       assem_debug("1:\n");
5230       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5231                     ds_unneeded,ds_unneeded_upper);
5232       // load regs
5233       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5234       address_generation(i+1,&branch_regs[i],0);
5235       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5236       ds_assemble(i+1,&branch_regs[i]);
5237       cc=get_reg(branch_regs[i].regmap,CCREG);
5238       if(cc==-1) {
5239         emit_loadreg(CCREG,cc=HOST_CCREG);
5240         // CHECK: Is the following instruction (fall thru) allocated ok?
5241       }
5242       assert(cc==HOST_CCREG);
5243       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5244       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5245       assem_debug("cycle count (adj)\n");
5246       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5247       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5248       if(internal)
5249         assem_debug("branch: internal\n");
5250       else
5251         assem_debug("branch: external\n");
5252       if(internal&&is_ds[(ba[i]-start)>>2]) {
5253         ds_assemble_entry(i);
5254       }
5255       else {
5256         add_to_linker((int)out,ba[i],internal);
5257         emit_jmp(0);
5258       }
5259     }
5260     // branch not taken
5261     cop1_usable=prev_cop1_usable;
5262     if(!unconditional) {
5263       if(nottaken1) set_jump_target(nottaken1,(int)out);
5264       set_jump_target(nottaken,(int)out);
5265       assem_debug("2:\n");
5266       if(!likely[i]) {
5267         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5268                       ds_unneeded,ds_unneeded_upper);
5269         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5270         address_generation(i+1,&branch_regs[i],0);
5271         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5272         ds_assemble(i+1,&branch_regs[i]);
5273       }
5274       cc=get_reg(branch_regs[i].regmap,CCREG);
5275       if(cc==-1&&!likely[i]) {
5276         // Cycle count isn't in a register, temporarily load it then write it out
5277         emit_loadreg(CCREG,HOST_CCREG);
5278         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5279         int jaddr=(int)out;
5280         emit_jns(0);
5281         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5282         emit_storereg(CCREG,HOST_CCREG);
5283       }
5284       else{
5285         cc=get_reg(i_regmap,CCREG);
5286         assert(cc==HOST_CCREG);
5287         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5288         int jaddr=(int)out;
5289         emit_jns(0);
5290         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5291       }
5292     }
5293   }
5294 }
5295
5296 void sjump_assemble(int i,struct regstat *i_regs)
5297 {
5298   signed char *i_regmap=i_regs->regmap;
5299   int cc;
5300   int match;
5301   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5302   assem_debug("smatch=%d\n",match);
5303   int s1h,s1l;
5304   int prev_cop1_usable=cop1_usable;
5305   int unconditional=0,nevertaken=0;
5306   int only32=0;
5307   int invert=0;
5308   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5309   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5310   if(!match) invert=1;
5311   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5312   if(i>(ba[i]-start)>>2) invert=1;
5313   #endif
5314
5315   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5316   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5317
5318   if(ooo[i]) {
5319     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5320     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5321   }
5322   else {
5323     s1l=get_reg(i_regmap,rs1[i]);
5324     s1h=get_reg(i_regmap,rs1[i]|64);
5325   }
5326   if(rs1[i]==0)
5327   {
5328     if(opcode2[i]&1) unconditional=1;
5329     else nevertaken=1;
5330     // These are never taken (r0 is never less than zero)
5331     //assert(opcode2[i]!=0);
5332     //assert(opcode2[i]!=2);
5333     //assert(opcode2[i]!=0x10);
5334     //assert(opcode2[i]!=0x12);
5335   }
5336   else {
5337     only32=(regs[i].was32>>rs1[i])&1;
5338   }
5339
5340   if(ooo[i]) {
5341     // Out of order execution (delay slot first)
5342     //printf("OOOE\n");
5343     address_generation(i+1,i_regs,regs[i].regmap_entry);
5344     ds_assemble(i+1,i_regs);
5345     int adj;
5346     uint64_t bc_unneeded=branch_regs[i].u;
5347     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5348     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5349     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5350     bc_unneeded|=1;
5351     bc_unneeded_upper|=1;
5352     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5353                   bc_unneeded,bc_unneeded_upper);
5354     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5355     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5356     if(rt1[i]==31) {
5357       int rt,return_address;
5358       rt=get_reg(branch_regs[i].regmap,31);
5359       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5360       if(rt>=0) {
5361         // Save the PC even if the branch is not taken
5362         return_address=start+i*4+8;
5363         emit_movimm(return_address,rt); // PC into link register
5364         #ifdef IMM_PREFETCH
5365         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5366         #endif
5367       }
5368     }
5369     cc=get_reg(branch_regs[i].regmap,CCREG);
5370     assert(cc==HOST_CCREG);
5371     if(unconditional)
5372       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5373     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5374     assem_debug("cycle count (adj)\n");
5375     if(unconditional) {
5376       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5377       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5378         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5379         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5380         if(internal)
5381           assem_debug("branch: internal\n");
5382         else
5383           assem_debug("branch: external\n");
5384         if(internal&&is_ds[(ba[i]-start)>>2]) {
5385           ds_assemble_entry(i);
5386         }
5387         else {
5388           add_to_linker((int)out,ba[i],internal);
5389           emit_jmp(0);
5390         }
5391         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5392         if(((u_int)out)&7) emit_addnop(0);
5393         #endif
5394       }
5395     }
5396     else if(nevertaken) {
5397       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5398       int jaddr=(int)out;
5399       emit_jns(0);
5400       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5401     }
5402     else {
5403       int nottaken=0;
5404       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5405       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5406       if(!only32)
5407       {
5408         assert(s1h>=0);
5409         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5410         {
5411           emit_test(s1h,s1h);
5412           if(invert){
5413             nottaken=(int)out;
5414             emit_jns(1);
5415           }else{
5416             add_to_linker((int)out,ba[i],internal);
5417             emit_js(0);
5418           }
5419         }
5420         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5421         {
5422           emit_test(s1h,s1h);
5423           if(invert){
5424             nottaken=(int)out;
5425             emit_js(1);
5426           }else{
5427             add_to_linker((int)out,ba[i],internal);
5428             emit_jns(0);
5429           }
5430         }
5431       } // if(!only32)
5432       else
5433       {
5434         assert(s1l>=0);
5435         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5436         {
5437           emit_test(s1l,s1l);
5438           if(invert){
5439             nottaken=(int)out;
5440             emit_jns(1);
5441           }else{
5442             add_to_linker((int)out,ba[i],internal);
5443             emit_js(0);
5444           }
5445         }
5446         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5447         {
5448           emit_test(s1l,s1l);
5449           if(invert){
5450             nottaken=(int)out;
5451             emit_js(1);
5452           }else{
5453             add_to_linker((int)out,ba[i],internal);
5454             emit_jns(0);
5455           }
5456         }
5457       } // if(!only32)
5458
5459       if(invert) {
5460         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5461         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5462           if(adj) {
5463             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5464             add_to_linker((int)out,ba[i],internal);
5465           }else{
5466             emit_addnop(13);
5467             add_to_linker((int)out,ba[i],internal*2);
5468           }
5469           emit_jmp(0);
5470         }else
5471         #endif
5472         {
5473           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5474           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5475           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5476           if(internal)
5477             assem_debug("branch: internal\n");
5478           else
5479             assem_debug("branch: external\n");
5480           if(internal&&is_ds[(ba[i]-start)>>2]) {
5481             ds_assemble_entry(i);
5482           }
5483           else {
5484             add_to_linker((int)out,ba[i],internal);
5485             emit_jmp(0);
5486           }
5487         }
5488         set_jump_target(nottaken,(int)out);
5489       }
5490
5491       if(adj) {
5492         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5493       }
5494     } // (!unconditional)
5495   } // if(ooo)
5496   else
5497   {
5498     // In-order execution (branch first)
5499     //printf("IOE\n");
5500     int nottaken=0;
5501     if(rt1[i]==31) {
5502       int rt,return_address;
5503       rt=get_reg(branch_regs[i].regmap,31);
5504       if(rt>=0) {
5505         // Save the PC even if the branch is not taken
5506         return_address=start+i*4+8;
5507         emit_movimm(return_address,rt); // PC into link register
5508         #ifdef IMM_PREFETCH
5509         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5510         #endif
5511       }
5512     }
5513     if(!unconditional) {
5514       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5515       if(!only32)
5516       {
5517         assert(s1h>=0);
5518         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5519         {
5520           emit_test(s1h,s1h);
5521           nottaken=(int)out;
5522           emit_jns(1);
5523         }
5524         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5525         {
5526           emit_test(s1h,s1h);
5527           nottaken=(int)out;
5528           emit_js(1);
5529         }
5530       } // if(!only32)
5531       else
5532       {
5533         assert(s1l>=0);
5534         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
5535         {
5536           emit_test(s1l,s1l);
5537           nottaken=(int)out;
5538           emit_jns(1);
5539         }
5540         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
5541         {
5542           emit_test(s1l,s1l);
5543           nottaken=(int)out;
5544           emit_js(1);
5545         }
5546       }
5547     } // if(!unconditional)
5548     int adj;
5549     uint64_t ds_unneeded=branch_regs[i].u;
5550     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5551     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5552     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5553     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5554     ds_unneeded|=1;
5555     ds_unneeded_upper|=1;
5556     // branch taken
5557     if(!nevertaken) {
5558       //assem_debug("1:\n");
5559       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5560                     ds_unneeded,ds_unneeded_upper);
5561       // load regs
5562       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5563       address_generation(i+1,&branch_regs[i],0);
5564       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5565       ds_assemble(i+1,&branch_regs[i]);
5566       cc=get_reg(branch_regs[i].regmap,CCREG);
5567       if(cc==-1) {
5568         emit_loadreg(CCREG,cc=HOST_CCREG);
5569         // CHECK: Is the following instruction (fall thru) allocated ok?
5570       }
5571       assert(cc==HOST_CCREG);
5572       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5573       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5574       assem_debug("cycle count (adj)\n");
5575       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5576       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5577       if(internal)
5578         assem_debug("branch: internal\n");
5579       else
5580         assem_debug("branch: external\n");
5581       if(internal&&is_ds[(ba[i]-start)>>2]) {
5582         ds_assemble_entry(i);
5583       }
5584       else {
5585         add_to_linker((int)out,ba[i],internal);
5586         emit_jmp(0);
5587       }
5588     }
5589     // branch not taken
5590     cop1_usable=prev_cop1_usable;
5591     if(!unconditional) {
5592       set_jump_target(nottaken,(int)out);
5593       assem_debug("1:\n");
5594       if(!likely[i]) {
5595         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5596                       ds_unneeded,ds_unneeded_upper);
5597         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5598         address_generation(i+1,&branch_regs[i],0);
5599         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5600         ds_assemble(i+1,&branch_regs[i]);
5601       }
5602       cc=get_reg(branch_regs[i].regmap,CCREG);
5603       if(cc==-1&&!likely[i]) {
5604         // Cycle count isn't in a register, temporarily load it then write it out
5605         emit_loadreg(CCREG,HOST_CCREG);
5606         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5607         int jaddr=(int)out;
5608         emit_jns(0);
5609         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5610         emit_storereg(CCREG,HOST_CCREG);
5611       }
5612       else{
5613         cc=get_reg(i_regmap,CCREG);
5614         assert(cc==HOST_CCREG);
5615         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5616         int jaddr=(int)out;
5617         emit_jns(0);
5618         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5619       }
5620     }
5621   }
5622 }
5623
5624 void fjump_assemble(int i,struct regstat *i_regs)
5625 {
5626   signed char *i_regmap=i_regs->regmap;
5627   int cc;
5628   int match;
5629   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5630   assem_debug("fmatch=%d\n",match);
5631   int fs,cs;
5632   int eaddr;
5633   int invert=0;
5634   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5635   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5636   if(!match) invert=1;
5637   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5638   if(i>(ba[i]-start)>>2) invert=1;
5639   #endif
5640
5641   if(ooo[i]) {
5642     fs=get_reg(branch_regs[i].regmap,FSREG);
5643     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
5644   }
5645   else {
5646     fs=get_reg(i_regmap,FSREG);
5647   }
5648
5649   // Check cop1 unusable
5650   if(!cop1_usable) {
5651     cs=get_reg(i_regmap,CSREG);
5652     assert(cs>=0);
5653     emit_testimm(cs,0x20000000);
5654     eaddr=(int)out;
5655     emit_jeq(0);
5656     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
5657     cop1_usable=1;
5658   }
5659
5660   if(ooo[i]) {
5661     // Out of order execution (delay slot first)
5662     //printf("OOOE\n");
5663     ds_assemble(i+1,i_regs);
5664     int adj;
5665     uint64_t bc_unneeded=branch_regs[i].u;
5666     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5667     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5668     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5669     bc_unneeded|=1;
5670     bc_unneeded_upper|=1;
5671     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5672                   bc_unneeded,bc_unneeded_upper);
5673     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5674     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5675     cc=get_reg(branch_regs[i].regmap,CCREG);
5676     assert(cc==HOST_CCREG);
5677     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5678     assem_debug("cycle count (adj)\n");
5679     if(1) {
5680       int nottaken=0;
5681       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5682       if(1) {
5683         assert(fs>=0);
5684         emit_testimm(fs,0x800000);
5685         if(source[i]&0x10000) // BC1T
5686         {
5687           if(invert){
5688             nottaken=(int)out;
5689             emit_jeq(1);
5690           }else{
5691             add_to_linker((int)out,ba[i],internal);
5692             emit_jne(0);
5693           }
5694         }
5695         else // BC1F
5696           if(invert){
5697             nottaken=(int)out;
5698             emit_jne(1);
5699           }else{
5700             add_to_linker((int)out,ba[i],internal);
5701             emit_jeq(0);
5702           }
5703         {
5704         }
5705       } // if(!only32)
5706
5707       if(invert) {
5708         if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5709         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5710         else if(match) emit_addnop(13);
5711         #endif
5712         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5713         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5714         if(internal)
5715           assem_debug("branch: internal\n");
5716         else
5717           assem_debug("branch: external\n");
5718         if(internal&&is_ds[(ba[i]-start)>>2]) {
5719           ds_assemble_entry(i);
5720         }
5721         else {
5722           add_to_linker((int)out,ba[i],internal);
5723           emit_jmp(0);
5724         }
5725         set_jump_target(nottaken,(int)out);
5726       }
5727
5728       if(adj) {
5729         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5730       }
5731     } // (!unconditional)
5732   } // if(ooo)
5733   else
5734   {
5735     // In-order execution (branch first)
5736     //printf("IOE\n");
5737     int nottaken=0;
5738     if(1) {
5739       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5740       if(1) {
5741         assert(fs>=0);
5742         emit_testimm(fs,0x800000);
5743         if(source[i]&0x10000) // BC1T
5744         {
5745           nottaken=(int)out;
5746           emit_jeq(1);
5747         }
5748         else // BC1F
5749         {
5750           nottaken=(int)out;
5751           emit_jne(1);
5752         }
5753       }
5754     } // if(!unconditional)
5755     int adj;
5756     uint64_t ds_unneeded=branch_regs[i].u;
5757     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5758     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5759     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5760     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5761     ds_unneeded|=1;
5762     ds_unneeded_upper|=1;
5763     // branch taken
5764     //assem_debug("1:\n");
5765     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5766                   ds_unneeded,ds_unneeded_upper);
5767     // load regs
5768     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5769     address_generation(i+1,&branch_regs[i],0);
5770     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5771     ds_assemble(i+1,&branch_regs[i]);
5772     cc=get_reg(branch_regs[i].regmap,CCREG);
5773     if(cc==-1) {
5774       emit_loadreg(CCREG,cc=HOST_CCREG);
5775       // CHECK: Is the following instruction (fall thru) allocated ok?
5776     }
5777     assert(cc==HOST_CCREG);
5778     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5779     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5780     assem_debug("cycle count (adj)\n");
5781     if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5782     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5783     if(internal)
5784       assem_debug("branch: internal\n");
5785     else
5786       assem_debug("branch: external\n");
5787     if(internal&&is_ds[(ba[i]-start)>>2]) {
5788       ds_assemble_entry(i);
5789     }
5790     else {
5791       add_to_linker((int)out,ba[i],internal);
5792       emit_jmp(0);
5793     }
5794
5795     // branch not taken
5796     if(1) { // <- FIXME (don't need this)
5797       set_jump_target(nottaken,(int)out);
5798       assem_debug("1:\n");
5799       if(!likely[i]) {
5800         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5801                       ds_unneeded,ds_unneeded_upper);
5802         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5803         address_generation(i+1,&branch_regs[i],0);
5804         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5805         ds_assemble(i+1,&branch_regs[i]);
5806       }
5807       cc=get_reg(branch_regs[i].regmap,CCREG);
5808       if(cc==-1&&!likely[i]) {
5809         // Cycle count isn't in a register, temporarily load it then write it out
5810         emit_loadreg(CCREG,HOST_CCREG);
5811         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5812         int jaddr=(int)out;
5813         emit_jns(0);
5814         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5815         emit_storereg(CCREG,HOST_CCREG);
5816       }
5817       else{
5818         cc=get_reg(i_regmap,CCREG);
5819         assert(cc==HOST_CCREG);
5820         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5821         int jaddr=(int)out;
5822         emit_jns(0);
5823         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5824       }
5825     }
5826   }
5827 }
5828
5829 static void pagespan_assemble(int i,struct regstat *i_regs)
5830 {
5831   int s1l=get_reg(i_regs->regmap,rs1[i]);
5832   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
5833   int s2l=get_reg(i_regs->regmap,rs2[i]);
5834   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
5835   int taken=0;
5836   int nottaken=0;
5837   int unconditional=0;
5838   if(rs1[i]==0)
5839   {
5840     s1l=s2l;s1h=s2h;
5841     s2l=s2h=-1;
5842   }
5843   else if(rs2[i]==0)
5844   {
5845     s2l=s2h=-1;
5846   }
5847   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
5848     s1h=s2h=-1;
5849   }
5850   int hr=0;
5851   int addr=-1,alt=-1,ntaddr=-1;
5852   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
5853   else {
5854     while(hr<HOST_REGS)
5855     {
5856       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5857          (i_regs->regmap[hr]&63)!=rs1[i] &&
5858          (i_regs->regmap[hr]&63)!=rs2[i] )
5859       {
5860         addr=hr++;break;
5861       }
5862       hr++;
5863     }
5864   }
5865   while(hr<HOST_REGS)
5866   {
5867     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5868        (i_regs->regmap[hr]&63)!=rs1[i] &&
5869        (i_regs->regmap[hr]&63)!=rs2[i] )
5870     {
5871       alt=hr++;break;
5872     }
5873     hr++;
5874   }
5875   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5876   {
5877     while(hr<HOST_REGS)
5878     {
5879       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
5880          (i_regs->regmap[hr]&63)!=rs1[i] &&
5881          (i_regs->regmap[hr]&63)!=rs2[i] )
5882       {
5883         ntaddr=hr;break;
5884       }
5885       hr++;
5886     }
5887   }
5888   assert(hr<HOST_REGS);
5889   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
5890     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
5891   }
5892   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5893   if(opcode[i]==2) // J
5894   {
5895     unconditional=1;
5896   }
5897   if(opcode[i]==3) // JAL
5898   {
5899     // TODO: mini_ht
5900     int rt=get_reg(i_regs->regmap,31);
5901     emit_movimm(start+i*4+8,rt);
5902     unconditional=1;
5903   }
5904   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
5905   {
5906     emit_mov(s1l,addr);
5907     if(opcode2[i]==9) // JALR
5908     {
5909       int rt=get_reg(i_regs->regmap,rt1[i]);
5910       emit_movimm(start+i*4+8,rt);
5911     }
5912   }
5913   if((opcode[i]&0x3f)==4) // BEQ
5914   {
5915     if(rs1[i]==rs2[i])
5916     {
5917       unconditional=1;
5918     }
5919     else
5920     #ifdef HAVE_CMOV_IMM
5921     if(s1h<0) {
5922       if(s2l>=0) emit_cmp(s1l,s2l);
5923       else emit_test(s1l,s1l);
5924       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5925     }
5926     else
5927     #endif
5928     {
5929       assert(s1l>=0);
5930       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5931       if(s1h>=0) {
5932         if(s2h>=0) emit_cmp(s1h,s2h);
5933         else emit_test(s1h,s1h);
5934         emit_cmovne_reg(alt,addr);
5935       }
5936       if(s2l>=0) emit_cmp(s1l,s2l);
5937       else emit_test(s1l,s1l);
5938       emit_cmovne_reg(alt,addr);
5939     }
5940   }
5941   if((opcode[i]&0x3f)==5) // BNE
5942   {
5943     #ifdef HAVE_CMOV_IMM
5944     if(s1h<0) {
5945       if(s2l>=0) emit_cmp(s1l,s2l);
5946       else emit_test(s1l,s1l);
5947       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5948     }
5949     else
5950     #endif
5951     {
5952       assert(s1l>=0);
5953       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5954       if(s1h>=0) {
5955         if(s2h>=0) emit_cmp(s1h,s2h);
5956         else emit_test(s1h,s1h);
5957         emit_cmovne_reg(alt,addr);
5958       }
5959       if(s2l>=0) emit_cmp(s1l,s2l);
5960       else emit_test(s1l,s1l);
5961       emit_cmovne_reg(alt,addr);
5962     }
5963   }
5964   if((opcode[i]&0x3f)==0x14) // BEQL
5965   {
5966     if(s1h>=0) {
5967       if(s2h>=0) emit_cmp(s1h,s2h);
5968       else emit_test(s1h,s1h);
5969       nottaken=(int)out;
5970       emit_jne(0);
5971     }
5972     if(s2l>=0) emit_cmp(s1l,s2l);
5973     else emit_test(s1l,s1l);
5974     if(nottaken) set_jump_target(nottaken,(int)out);
5975     nottaken=(int)out;
5976     emit_jne(0);
5977   }
5978   if((opcode[i]&0x3f)==0x15) // BNEL
5979   {
5980     if(s1h>=0) {
5981       if(s2h>=0) emit_cmp(s1h,s2h);
5982       else emit_test(s1h,s1h);
5983       taken=(int)out;
5984       emit_jne(0);
5985     }
5986     if(s2l>=0) emit_cmp(s1l,s2l);
5987     else emit_test(s1l,s1l);
5988     nottaken=(int)out;
5989     emit_jeq(0);
5990     if(taken) set_jump_target(taken,(int)out);
5991   }
5992   if((opcode[i]&0x3f)==6) // BLEZ
5993   {
5994     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5995     emit_cmpimm(s1l,1);
5996     if(s1h>=0) emit_mov(addr,ntaddr);
5997     emit_cmovl_reg(alt,addr);
5998     if(s1h>=0) {
5999       emit_test(s1h,s1h);
6000       emit_cmovne_reg(ntaddr,addr);
6001       emit_cmovs_reg(alt,addr);
6002     }
6003   }
6004   if((opcode[i]&0x3f)==7) // BGTZ
6005   {
6006     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6007     emit_cmpimm(s1l,1);
6008     if(s1h>=0) emit_mov(addr,alt);
6009     emit_cmovl_reg(ntaddr,addr);
6010     if(s1h>=0) {
6011       emit_test(s1h,s1h);
6012       emit_cmovne_reg(alt,addr);
6013       emit_cmovs_reg(ntaddr,addr);
6014     }
6015   }
6016   if((opcode[i]&0x3f)==0x16) // BLEZL
6017   {
6018     assert((opcode[i]&0x3f)!=0x16);
6019   }
6020   if((opcode[i]&0x3f)==0x17) // BGTZL
6021   {
6022     assert((opcode[i]&0x3f)!=0x17);
6023   }
6024   assert(opcode[i]!=1); // BLTZ/BGEZ
6025
6026   //FIXME: Check CSREG
6027   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6028     if((source[i]&0x30000)==0) // BC1F
6029     {
6030       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6031       emit_testimm(s1l,0x800000);
6032       emit_cmovne_reg(alt,addr);
6033     }
6034     if((source[i]&0x30000)==0x10000) // BC1T
6035     {
6036       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6037       emit_testimm(s1l,0x800000);
6038       emit_cmovne_reg(alt,addr);
6039     }
6040     if((source[i]&0x30000)==0x20000) // BC1FL
6041     {
6042       emit_testimm(s1l,0x800000);
6043       nottaken=(int)out;
6044       emit_jne(0);
6045     }
6046     if((source[i]&0x30000)==0x30000) // BC1TL
6047     {
6048       emit_testimm(s1l,0x800000);
6049       nottaken=(int)out;
6050       emit_jeq(0);
6051     }
6052   }
6053
6054   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6055   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6056   if(likely[i]||unconditional)
6057   {
6058     emit_movimm(ba[i],HOST_BTREG);
6059   }
6060   else if(addr!=HOST_BTREG)
6061   {
6062     emit_mov(addr,HOST_BTREG);
6063   }
6064   void *branch_addr=out;
6065   emit_jmp(0);
6066   int target_addr=start+i*4+5;
6067   void *stub=out;
6068   void *compiled_target_addr=check_addr(target_addr);
6069   emit_extjump_ds((int)branch_addr,target_addr);
6070   if(compiled_target_addr) {
6071     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6072     add_link(target_addr,stub);
6073   }
6074   else set_jump_target((int)branch_addr,(int)stub);
6075   if(likely[i]) {
6076     // Not-taken path
6077     set_jump_target((int)nottaken,(int)out);
6078     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6079     void *branch_addr=out;
6080     emit_jmp(0);
6081     int target_addr=start+i*4+8;
6082     void *stub=out;
6083     void *compiled_target_addr=check_addr(target_addr);
6084     emit_extjump_ds((int)branch_addr,target_addr);
6085     if(compiled_target_addr) {
6086       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6087       add_link(target_addr,stub);
6088     }
6089     else set_jump_target((int)branch_addr,(int)stub);
6090   }
6091 }
6092
6093 // Assemble the delay slot for the above
6094 static void pagespan_ds()
6095 {
6096   assem_debug("initial delay slot:\n");
6097   u_int vaddr=start+1;
6098   u_int page=get_page(vaddr);
6099   u_int vpage=get_vpage(vaddr);
6100   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6101   do_dirty_stub_ds();
6102   ll_add(jump_in+page,vaddr,(void *)out);
6103   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6104   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6105     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6106   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6107     emit_writeword(HOST_BTREG,(int)&branch_target);
6108   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6109   address_generation(0,&regs[0],regs[0].regmap_entry);
6110   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6111     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6112   cop1_usable=0;
6113   is_delayslot=0;
6114   switch(itype[0]) {
6115     case ALU:
6116       alu_assemble(0,&regs[0]);break;
6117     case IMM16:
6118       imm16_assemble(0,&regs[0]);break;
6119     case SHIFT:
6120       shift_assemble(0,&regs[0]);break;
6121     case SHIFTIMM:
6122       shiftimm_assemble(0,&regs[0]);break;
6123     case LOAD:
6124       load_assemble(0,&regs[0]);break;
6125     case LOADLR:
6126       loadlr_assemble(0,&regs[0]);break;
6127     case STORE:
6128       store_assemble(0,&regs[0]);break;
6129     case STORELR:
6130       storelr_assemble(0,&regs[0]);break;
6131     case COP0:
6132       cop0_assemble(0,&regs[0]);break;
6133     case COP1:
6134       cop1_assemble(0,&regs[0]);break;
6135     case C1LS:
6136       c1ls_assemble(0,&regs[0]);break;
6137     case COP2:
6138       cop2_assemble(0,&regs[0]);break;
6139     case C2LS:
6140       c2ls_assemble(0,&regs[0]);break;
6141     case C2OP:
6142       c2op_assemble(0,&regs[0]);break;
6143     case FCONV:
6144       fconv_assemble(0,&regs[0]);break;
6145     case FLOAT:
6146       float_assemble(0,&regs[0]);break;
6147     case FCOMP:
6148       fcomp_assemble(0,&regs[0]);break;
6149     case MULTDIV:
6150       multdiv_assemble(0,&regs[0]);break;
6151     case MOV:
6152       mov_assemble(0,&regs[0]);break;
6153     case SYSCALL:
6154     case HLECALL:
6155     case INTCALL:
6156     case SPAN:
6157     case UJUMP:
6158     case RJUMP:
6159     case CJUMP:
6160     case SJUMP:
6161     case FJUMP:
6162       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
6163   }
6164   int btaddr=get_reg(regs[0].regmap,BTREG);
6165   if(btaddr<0) {
6166     btaddr=get_reg(regs[0].regmap,-1);
6167     emit_readword((int)&branch_target,btaddr);
6168   }
6169   assert(btaddr!=HOST_CCREG);
6170   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6171 #ifdef HOST_IMM8
6172   emit_movimm(start+4,HOST_TEMPREG);
6173   emit_cmp(btaddr,HOST_TEMPREG);
6174 #else
6175   emit_cmpimm(btaddr,start+4);
6176 #endif
6177   int branch=(int)out;
6178   emit_jeq(0);
6179   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6180   emit_jmp(jump_vaddr_reg[btaddr]);
6181   set_jump_target(branch,(int)out);
6182   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6183   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6184 }
6185
6186 // Basic liveness analysis for MIPS registers
6187 void unneeded_registers(int istart,int iend,int r)
6188 {
6189   int i;
6190   uint64_t u,uu,gte_u,b,bu,gte_bu;
6191   uint64_t temp_u,temp_uu,temp_gte_u=0;
6192   uint64_t tdep;
6193   uint64_t gte_u_unknown=0;
6194   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
6195     gte_u_unknown=~0ll;
6196   if(iend==slen-1) {
6197     u=1;uu=1;
6198     gte_u=gte_u_unknown;
6199   }else{
6200     u=unneeded_reg[iend+1];
6201     uu=unneeded_reg_upper[iend+1];
6202     u=1;uu=1;
6203     gte_u=gte_unneeded[iend+1];
6204   }
6205
6206   for (i=iend;i>=istart;i--)
6207   {
6208     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6209     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6210     {
6211       // If subroutine call, flag return address as a possible branch target
6212       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6213
6214       if(ba[i]<start || ba[i]>=(start+slen*4))
6215       {
6216         // Branch out of this block, flush all regs
6217         u=1;
6218         uu=1;
6219         gte_u=gte_u_unknown;
6220         /* Hexagon hack
6221         if(itype[i]==UJUMP&&rt1[i]==31)
6222         {
6223           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6224         }
6225         if(itype[i]==RJUMP&&rs1[i]==31)
6226         {
6227           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6228         }
6229         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6230           if(itype[i]==UJUMP&&rt1[i]==31)
6231           {
6232             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6233             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6234           }
6235           if(itype[i]==RJUMP&&rs1[i]==31)
6236           {
6237             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6238             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6239           }
6240         }*/
6241         branch_unneeded_reg[i]=u;
6242         branch_unneeded_reg_upper[i]=uu;
6243         // Merge in delay slot
6244         tdep=(~uu>>rt1[i+1])&1;
6245         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6246         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6247         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6248         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6249         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6250         u|=1;uu|=1;
6251         gte_u|=gte_rt[i+1];
6252         gte_u&=~gte_rs[i+1];
6253         // If branch is "likely" (and conditional)
6254         // then we skip the delay slot on the fall-thru path
6255         if(likely[i]) {
6256           if(i<slen-1) {
6257             u&=unneeded_reg[i+2];
6258             uu&=unneeded_reg_upper[i+2];
6259             gte_u&=gte_unneeded[i+2];
6260           }
6261           else
6262           {
6263             u=1;
6264             uu=1;
6265             gte_u=gte_u_unknown;
6266           }
6267         }
6268       }
6269       else
6270       {
6271         // Internal branch, flag target
6272         bt[(ba[i]-start)>>2]=1;
6273         if(ba[i]<=start+i*4) {
6274           // Backward branch
6275           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6276           {
6277             // Unconditional branch
6278             temp_u=1;temp_uu=1;
6279             temp_gte_u=0;
6280           } else {
6281             // Conditional branch (not taken case)
6282             temp_u=unneeded_reg[i+2];
6283             temp_uu=unneeded_reg_upper[i+2];
6284             temp_gte_u&=gte_unneeded[i+2];
6285           }
6286           // Merge in delay slot
6287           tdep=(~temp_uu>>rt1[i+1])&1;
6288           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6289           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6290           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6291           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6292           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6293           temp_u|=1;temp_uu|=1;
6294           temp_gte_u|=gte_rt[i+1];
6295           temp_gte_u&=~gte_rs[i+1];
6296           // If branch is "likely" (and conditional)
6297           // then we skip the delay slot on the fall-thru path
6298           if(likely[i]) {
6299             if(i<slen-1) {
6300               temp_u&=unneeded_reg[i+2];
6301               temp_uu&=unneeded_reg_upper[i+2];
6302               temp_gte_u&=gte_unneeded[i+2];
6303             }
6304             else
6305             {
6306               temp_u=1;
6307               temp_uu=1;
6308               temp_gte_u=gte_u_unknown;
6309             }
6310           }
6311           tdep=(~temp_uu>>rt1[i])&1;
6312           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6313           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6314           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6315           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6316           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6317           temp_u|=1;temp_uu|=1;
6318           temp_gte_u|=gte_rt[i];
6319           temp_gte_u&=~gte_rs[i];
6320           unneeded_reg[i]=temp_u;
6321           unneeded_reg_upper[i]=temp_uu;
6322           gte_unneeded[i]=temp_gte_u;
6323           // Only go three levels deep.  This recursion can take an
6324           // excessive amount of time if there are a lot of nested loops.
6325           if(r<2) {
6326             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6327           }else{
6328             unneeded_reg[(ba[i]-start)>>2]=1;
6329             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6330             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
6331           }
6332         } /*else*/ if(1) {
6333           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6334           {
6335             // Unconditional branch
6336             u=unneeded_reg[(ba[i]-start)>>2];
6337             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6338             gte_u=gte_unneeded[(ba[i]-start)>>2];
6339             branch_unneeded_reg[i]=u;
6340             branch_unneeded_reg_upper[i]=uu;
6341         //u=1;
6342         //uu=1;
6343         //branch_unneeded_reg[i]=u;
6344         //branch_unneeded_reg_upper[i]=uu;
6345             // Merge in delay slot
6346             tdep=(~uu>>rt1[i+1])&1;
6347             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6348             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6349             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6350             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6351             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6352             u|=1;uu|=1;
6353             gte_u|=gte_rt[i+1];
6354             gte_u&=~gte_rs[i+1];
6355           } else {
6356             // Conditional branch
6357             b=unneeded_reg[(ba[i]-start)>>2];
6358             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6359             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6360             branch_unneeded_reg[i]=b;
6361             branch_unneeded_reg_upper[i]=bu;
6362         //b=1;
6363         //bu=1;
6364         //branch_unneeded_reg[i]=b;
6365         //branch_unneeded_reg_upper[i]=bu;
6366             // Branch delay slot
6367             tdep=(~uu>>rt1[i+1])&1;
6368             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6369             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6370             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6371             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6372             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6373             b|=1;bu|=1;
6374             gte_bu|=gte_rt[i+1];
6375             gte_bu&=~gte_rs[i+1];
6376             // If branch is "likely" then we skip the
6377             // delay slot on the fall-thru path
6378             if(likely[i]) {
6379               u=b;
6380               uu=bu;
6381               gte_u=gte_bu;
6382               if(i<slen-1) {
6383                 u&=unneeded_reg[i+2];
6384                 uu&=unneeded_reg_upper[i+2];
6385                 gte_u&=gte_unneeded[i+2];
6386         //u=1;
6387         //uu=1;
6388               }
6389             } else {
6390               u&=b;
6391               uu&=bu;
6392               gte_u&=gte_bu;
6393         //u=1;
6394         //uu=1;
6395             }
6396             if(i<slen-1) {
6397               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6398               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6399         //branch_unneeded_reg[i]=1;
6400         //branch_unneeded_reg_upper[i]=1;
6401             } else {
6402               branch_unneeded_reg[i]=1;
6403               branch_unneeded_reg_upper[i]=1;
6404             }
6405           }
6406         }
6407       }
6408     }
6409     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6410     {
6411       // SYSCALL instruction (software interrupt)
6412       u=1;
6413       uu=1;
6414     }
6415     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6416     {
6417       // ERET instruction (return from interrupt)
6418       u=1;
6419       uu=1;
6420     }
6421     //u=uu=1; // DEBUG
6422     tdep=(~uu>>rt1[i])&1;
6423     // Written registers are unneeded
6424     u|=1LL<<rt1[i];
6425     u|=1LL<<rt2[i];
6426     uu|=1LL<<rt1[i];
6427     uu|=1LL<<rt2[i];
6428     gte_u|=gte_rt[i];
6429     // Accessed registers are needed
6430     u&=~(1LL<<rs1[i]);
6431     u&=~(1LL<<rs2[i]);
6432     uu&=~(1LL<<us1[i]);
6433     uu&=~(1LL<<us2[i]);
6434     gte_u&=~gte_rs[i];
6435     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
6436       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
6437     // Source-target dependencies
6438     uu&=~(tdep<<dep1[i]);
6439     uu&=~(tdep<<dep2[i]);
6440     // R0 is always unneeded
6441     u|=1;uu|=1;
6442     // Save it
6443     unneeded_reg[i]=u;
6444     unneeded_reg_upper[i]=uu;
6445     gte_unneeded[i]=gte_u;
6446     /*
6447     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6448     printf("U:");
6449     int r;
6450     for(r=1;r<=CCREG;r++) {
6451       if((unneeded_reg[i]>>r)&1) {
6452         if(r==HIREG) printf(" HI");
6453         else if(r==LOREG) printf(" LO");
6454         else printf(" r%d",r);
6455       }
6456     }
6457     printf(" UU:");
6458     for(r=1;r<=CCREG;r++) {
6459       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6460         if(r==HIREG) printf(" HI");
6461         else if(r==LOREG) printf(" LO");
6462         else printf(" r%d",r);
6463       }
6464     }
6465     printf("\n");*/
6466   }
6467   for (i=iend;i>=istart;i--)
6468   {
6469     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
6470   }
6471 }
6472
6473 // Write back dirty registers as soon as we will no longer modify them,
6474 // so that we don't end up with lots of writes at the branches.
6475 void clean_registers(int istart,int iend,int wr)
6476 {
6477   int i;
6478   int r;
6479   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
6480   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
6481   if(iend==slen-1) {
6482     will_dirty_i=will_dirty_next=0;
6483     wont_dirty_i=wont_dirty_next=0;
6484   }else{
6485     will_dirty_i=will_dirty_next=will_dirty[iend+1];
6486     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
6487   }
6488   for (i=iend;i>=istart;i--)
6489   {
6490     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6491     {
6492       if(ba[i]<start || ba[i]>=(start+slen*4))
6493       {
6494         // Branch out of this block, flush all regs
6495         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6496         {
6497           // Unconditional branch
6498           will_dirty_i=0;
6499           wont_dirty_i=0;
6500           // Merge in delay slot (will dirty)
6501           for(r=0;r<HOST_REGS;r++) {
6502             if(r!=EXCLUDE_REG) {
6503               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6504               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6505               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6506               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6507               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6508               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6509               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6510               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6511               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6512               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6513               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6514               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6515               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6516               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6517             }
6518           }
6519         }
6520         else
6521         {
6522           // Conditional branch
6523           will_dirty_i=0;
6524           wont_dirty_i=wont_dirty_next;
6525           // Merge in delay slot (will dirty)
6526           for(r=0;r<HOST_REGS;r++) {
6527             if(r!=EXCLUDE_REG) {
6528               if(!likely[i]) {
6529                 // Might not dirty if likely branch is not taken
6530                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6531                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6532                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6533                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6534                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6535                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
6536                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6537                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6538                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6539                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6540                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6541                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6542                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6543                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6544               }
6545             }
6546           }
6547         }
6548         // Merge in delay slot (wont dirty)
6549         for(r=0;r<HOST_REGS;r++) {
6550           if(r!=EXCLUDE_REG) {
6551             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6552             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6553             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6554             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6555             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6556             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6557             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6558             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6559             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6560             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6561           }
6562         }
6563         if(wr) {
6564           #ifndef DESTRUCTIVE_WRITEBACK
6565           branch_regs[i].dirty&=wont_dirty_i;
6566           #endif
6567           branch_regs[i].dirty|=will_dirty_i;
6568         }
6569       }
6570       else
6571       {
6572         // Internal branch
6573         if(ba[i]<=start+i*4) {
6574           // Backward branch
6575           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6576           {
6577             // Unconditional branch
6578             temp_will_dirty=0;
6579             temp_wont_dirty=0;
6580             // Merge in delay slot (will dirty)
6581             for(r=0;r<HOST_REGS;r++) {
6582               if(r!=EXCLUDE_REG) {
6583                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6584                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6585                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6586                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6587                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6588                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6589                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6590                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6591                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6592                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6593                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6594                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6595                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6596                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6597               }
6598             }
6599           } else {
6600             // Conditional branch (not taken case)
6601             temp_will_dirty=will_dirty_next;
6602             temp_wont_dirty=wont_dirty_next;
6603             // Merge in delay slot (will dirty)
6604             for(r=0;r<HOST_REGS;r++) {
6605               if(r!=EXCLUDE_REG) {
6606                 if(!likely[i]) {
6607                   // Will not dirty if likely branch is not taken
6608                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6609                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6610                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6611                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6612                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6613                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
6614                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6615                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
6616                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
6617                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
6618                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
6619                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
6620                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
6621                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
6622                 }
6623               }
6624             }
6625           }
6626           // Merge in delay slot (wont dirty)
6627           for(r=0;r<HOST_REGS;r++) {
6628             if(r!=EXCLUDE_REG) {
6629               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6630               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6631               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6632               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6633               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6634               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
6635               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
6636               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
6637               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
6638               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
6639             }
6640           }
6641           // Deal with changed mappings
6642           if(i<iend) {
6643             for(r=0;r<HOST_REGS;r++) {
6644               if(r!=EXCLUDE_REG) {
6645                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
6646                   temp_will_dirty&=~(1<<r);
6647                   temp_wont_dirty&=~(1<<r);
6648                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6649                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6650                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6651                   } else {
6652                     temp_will_dirty|=1<<r;
6653                     temp_wont_dirty|=1<<r;
6654                   }
6655                 }
6656               }
6657             }
6658           }
6659           if(wr) {
6660             will_dirty[i]=temp_will_dirty;
6661             wont_dirty[i]=temp_wont_dirty;
6662             clean_registers((ba[i]-start)>>2,i-1,0);
6663           }else{
6664             // Limit recursion.  It can take an excessive amount
6665             // of time if there are a lot of nested loops.
6666             will_dirty[(ba[i]-start)>>2]=0;
6667             wont_dirty[(ba[i]-start)>>2]=-1;
6668           }
6669         }
6670         /*else*/ if(1)
6671         {
6672           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6673           {
6674             // Unconditional branch
6675             will_dirty_i=0;
6676             wont_dirty_i=0;
6677           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6678             for(r=0;r<HOST_REGS;r++) {
6679               if(r!=EXCLUDE_REG) {
6680                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6681                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
6682                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6683                 }
6684                 if(branch_regs[i].regmap[r]>=0) {
6685                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6686                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
6687                 }
6688               }
6689             }
6690           //}
6691             // Merge in delay slot
6692             for(r=0;r<HOST_REGS;r++) {
6693               if(r!=EXCLUDE_REG) {
6694                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6695                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6696                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6697                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6698                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6699                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6700                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6701                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6702                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6703                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6704                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6705                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6706                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6707                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6708               }
6709             }
6710           } else {
6711             // Conditional branch
6712             will_dirty_i=will_dirty_next;
6713             wont_dirty_i=wont_dirty_next;
6714           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
6715             for(r=0;r<HOST_REGS;r++) {
6716               if(r!=EXCLUDE_REG) {
6717                 signed char target_reg=branch_regs[i].regmap[r];
6718                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6719                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6720                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6721                 }
6722                 else if(target_reg>=0) {
6723                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6724                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
6725                 }
6726                 // Treat delay slot as part of branch too
6727                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
6728                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
6729                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
6730                 }
6731                 else
6732                 {
6733                   will_dirty[i+1]&=~(1<<r);
6734                 }*/
6735               }
6736             }
6737           //}
6738             // Merge in delay slot
6739             for(r=0;r<HOST_REGS;r++) {
6740               if(r!=EXCLUDE_REG) {
6741                 if(!likely[i]) {
6742                   // Might not dirty if likely branch is not taken
6743                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6744                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6745                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6746                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6747                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6748                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6749                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6750                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6751                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6752                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
6753                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
6754                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6755                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6756                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6757                 }
6758               }
6759             }
6760           }
6761           // Merge in delay slot (won't dirty)
6762           for(r=0;r<HOST_REGS;r++) {
6763             if(r!=EXCLUDE_REG) {
6764               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6765               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6766               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6767               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6768               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6769               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6770               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6771               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
6772               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
6773               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6774             }
6775           }
6776           if(wr) {
6777             #ifndef DESTRUCTIVE_WRITEBACK
6778             branch_regs[i].dirty&=wont_dirty_i;
6779             #endif
6780             branch_regs[i].dirty|=will_dirty_i;
6781           }
6782         }
6783       }
6784     }
6785     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6786     {
6787       // SYSCALL instruction (software interrupt)
6788       will_dirty_i=0;
6789       wont_dirty_i=0;
6790     }
6791     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6792     {
6793       // ERET instruction (return from interrupt)
6794       will_dirty_i=0;
6795       wont_dirty_i=0;
6796     }
6797     will_dirty_next=will_dirty_i;
6798     wont_dirty_next=wont_dirty_i;
6799     for(r=0;r<HOST_REGS;r++) {
6800       if(r!=EXCLUDE_REG) {
6801         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
6802         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
6803         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
6804         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
6805         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
6806         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
6807         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
6808         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
6809         if(i>istart) {
6810           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
6811           {
6812             // Don't store a register immediately after writing it,
6813             // may prevent dual-issue.
6814             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
6815             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
6816           }
6817         }
6818       }
6819     }
6820     // Save it
6821     will_dirty[i]=will_dirty_i;
6822     wont_dirty[i]=wont_dirty_i;
6823     // Mark registers that won't be dirtied as not dirty
6824     if(wr) {
6825       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
6826       for(r=0;r<HOST_REGS;r++) {
6827         if((will_dirty_i>>r)&1) {
6828           printf(" r%d",r);
6829         }
6830       }
6831       printf("\n");*/
6832
6833       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
6834         regs[i].dirty|=will_dirty_i;
6835         #ifndef DESTRUCTIVE_WRITEBACK
6836         regs[i].dirty&=wont_dirty_i;
6837         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6838         {
6839           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
6840             for(r=0;r<HOST_REGS;r++) {
6841               if(r!=EXCLUDE_REG) {
6842                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
6843                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
6844                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6845               }
6846             }
6847           }
6848         }
6849         else
6850         {
6851           if(i<iend) {
6852             for(r=0;r<HOST_REGS;r++) {
6853               if(r!=EXCLUDE_REG) {
6854                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
6855                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
6856                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);assert(!((wont_dirty_i>>r)&1));*/}
6857               }
6858             }
6859           }
6860         }
6861         #endif
6862       //}
6863     }
6864     // Deal with changed mappings
6865     temp_will_dirty=will_dirty_i;
6866     temp_wont_dirty=wont_dirty_i;
6867     for(r=0;r<HOST_REGS;r++) {
6868       if(r!=EXCLUDE_REG) {
6869         int nr;
6870         if(regs[i].regmap[r]==regmap_pre[i][r]) {
6871           if(wr) {
6872             #ifndef DESTRUCTIVE_WRITEBACK
6873             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6874             #endif
6875             regs[i].wasdirty|=will_dirty_i&(1<<r);
6876           }
6877         }
6878         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
6879           // Register moved to a different register
6880           will_dirty_i&=~(1<<r);
6881           wont_dirty_i&=~(1<<r);
6882           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
6883           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
6884           if(wr) {
6885             #ifndef DESTRUCTIVE_WRITEBACK
6886             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
6887             #endif
6888             regs[i].wasdirty|=will_dirty_i&(1<<r);
6889           }
6890         }
6891         else {
6892           will_dirty_i&=~(1<<r);
6893           wont_dirty_i&=~(1<<r);
6894           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
6895             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6896             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
6897           } else {
6898             wont_dirty_i|=1<<r;
6899             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);assert(!((will_dirty>>r)&1));*/
6900           }
6901         }
6902       }
6903     }
6904   }
6905 }
6906
6907 #ifdef DISASM
6908   /* disassembly */
6909 void disassemble_inst(int i)
6910 {
6911     if (bt[i]) printf("*"); else printf(" ");
6912     switch(itype[i]) {
6913       case UJUMP:
6914         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6915       case CJUMP:
6916         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
6917       case SJUMP:
6918         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
6919       case FJUMP:
6920         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
6921       case RJUMP:
6922         if (opcode[i]==0x9&&rt1[i]!=31)
6923           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
6924         else
6925           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6926         break;
6927       case SPAN:
6928         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
6929       case IMM16:
6930         if(opcode[i]==0xf) //LUI
6931           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
6932         else
6933           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6934         break;
6935       case LOAD:
6936       case LOADLR:
6937         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6938         break;
6939       case STORE:
6940       case STORELR:
6941         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
6942         break;
6943       case ALU:
6944       case SHIFT:
6945         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
6946         break;
6947       case MULTDIV:
6948         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
6949         break;
6950       case SHIFTIMM:
6951         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
6952         break;
6953       case MOV:
6954         if((opcode2[i]&0x1d)==0x10)
6955           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
6956         else if((opcode2[i]&0x1d)==0x11)
6957           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
6958         else
6959           printf (" %x: %s\n",start+i*4,insn[i]);
6960         break;
6961       case COP0:
6962         if(opcode2[i]==0)
6963           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
6964         else if(opcode2[i]==4)
6965           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
6966         else printf (" %x: %s\n",start+i*4,insn[i]);
6967         break;
6968       case COP1:
6969         if(opcode2[i]<3)
6970           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
6971         else if(opcode2[i]>3)
6972           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
6973         else printf (" %x: %s\n",start+i*4,insn[i]);
6974         break;
6975       case COP2:
6976         if(opcode2[i]<3)
6977           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
6978         else if(opcode2[i]>3)
6979           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
6980         else printf (" %x: %s\n",start+i*4,insn[i]);
6981         break;
6982       case C1LS:
6983         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6984         break;
6985       case C2LS:
6986         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
6987         break;
6988       case INTCALL:
6989         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
6990         break;
6991       default:
6992         //printf (" %s %8x\n",insn[i],source[i]);
6993         printf (" %x: %s\n",start+i*4,insn[i]);
6994     }
6995 }
6996 #else
6997 static void disassemble_inst(int i) {}
6998 #endif // DISASM
6999
7000 #define DRC_TEST_VAL 0x74657374
7001
7002 static int new_dynarec_test(void)
7003 {
7004   int (*testfunc)(void) = (void *)out;
7005   void *beginning;
7006   int ret;
7007
7008   beginning = start_block();
7009   emit_movimm(DRC_TEST_VAL,0); // test
7010   emit_jmpreg(14);
7011   literal_pool(0);
7012   end_block(beginning);
7013   SysPrintf("testing if we can run recompiled code..\n");
7014   ret = testfunc();
7015   if (ret == DRC_TEST_VAL)
7016     SysPrintf("test passed.\n");
7017   else
7018     SysPrintf("test failed: %08x\n", ret);
7019   out=(u_char *)BASE_ADDR;
7020   return ret == DRC_TEST_VAL;
7021 }
7022
7023 // clear the state completely, instead of just marking
7024 // things invalid like invalidate_all_pages() does
7025 void new_dynarec_clear_full()
7026 {
7027   int n;
7028   out=(u_char *)BASE_ADDR;
7029   memset(invalid_code,1,sizeof(invalid_code));
7030   memset(hash_table,0xff,sizeof(hash_table));
7031   memset(mini_ht,-1,sizeof(mini_ht));
7032   memset(restore_candidate,0,sizeof(restore_candidate));
7033   memset(shadow,0,sizeof(shadow));
7034   copy=shadow;
7035   expirep=16384; // Expiry pointer, +2 blocks
7036   pending_exception=0;
7037   literalcount=0;
7038   stop_after_jal=0;
7039   inv_code_start=inv_code_end=~0;
7040   // TLB
7041   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7042   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7043   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7044 }
7045
7046 void new_dynarec_init()
7047 {
7048   SysPrintf("Init new dynarec\n");
7049
7050   // allocate/prepare a buffer for translation cache
7051   // see assem_arm.h for some explanation
7052 #if   defined(BASE_ADDR_FIXED)
7053   if (mmap (translation_cache, 1 << TARGET_SIZE_2,
7054             PROT_READ | PROT_WRITE | PROT_EXEC,
7055             MAP_PRIVATE | MAP_ANONYMOUS,
7056             -1, 0) != translation_cache) {
7057     SysPrintf("mmap() failed: %s\n", strerror(errno));
7058     SysPrintf("disable BASE_ADDR_FIXED and recompile\n");
7059     abort();
7060   }
7061 #elif defined(BASE_ADDR_DYNAMIC)
7062   #ifdef VITA
7063   sceBlock = sceKernelAllocMemBlockForVM("code", 1 << TARGET_SIZE_2);
7064   if (sceBlock < 0)
7065     SysPrintf("sceKernelAllocMemBlockForVM failed\n");
7066   int ret = sceKernelGetMemBlockBase(sceBlock, (void **)&translation_cache);
7067   if (ret < 0)
7068     SysPrintf("sceKernelGetMemBlockBase failed\n");
7069   #else
7070   translation_cache = mmap (NULL, 1 << TARGET_SIZE_2,
7071             PROT_READ | PROT_WRITE | PROT_EXEC,
7072             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
7073   if (translation_cache == MAP_FAILED) {
7074     SysPrintf("mmap() failed: %s\n", strerror(errno));
7075     abort();
7076   }
7077   #endif
7078 #else
7079   #ifndef NO_WRITE_EXEC
7080   // not all systems allow execute in data segment by default
7081   if (mprotect((void *)BASE_ADDR, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
7082     SysPrintf("mprotect() failed: %s\n", strerror(errno));
7083   #endif
7084 #endif
7085   out=(u_char *)BASE_ADDR;
7086   cycle_multiplier=200;
7087   new_dynarec_clear_full();
7088 #ifdef HOST_IMM8
7089   // Copy this into local area so we don't have to put it in every literal pool
7090   invc_ptr=invalid_code;
7091 #endif
7092   arch_init();
7093   new_dynarec_test();
7094 #ifndef RAM_FIXED
7095   ram_offset=(u_int)rdram-0x80000000;
7096 #endif
7097   if (ram_offset!=0)
7098     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
7099 }
7100
7101 void new_dynarec_cleanup()
7102 {
7103   int n;
7104 #if defined(BASE_ADDR_FIXED) || defined(BASE_ADDR_DYNAMIC)
7105   #ifdef VITA
7106   sceKernelFreeMemBlock(sceBlock);
7107   sceBlock = -1;
7108   #else
7109   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0)
7110     SysPrintf("munmap() failed\n");
7111   #endif
7112 #endif
7113   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7114   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7115   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7116   #ifdef ROM_COPY
7117   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
7118   #endif
7119 }
7120
7121 static u_int *get_source_start(u_int addr, u_int *limit)
7122 {
7123   if (addr < 0x00200000 ||
7124     (0xa0000000 <= addr && addr < 0xa0200000)) {
7125     // used for BIOS calls mostly?
7126     *limit = (addr&0xa0000000)|0x00200000;
7127     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7128   }
7129   else if (!Config.HLE && (
7130     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
7131     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
7132     // BIOS
7133     *limit = (addr & 0xfff00000) | 0x80000;
7134     return (u_int *)((u_int)psxR + (addr&0x7ffff));
7135   }
7136   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
7137     *limit = (addr & 0x80600000) + 0x00200000;
7138     return (u_int *)((u_int)rdram + (addr&0x1fffff));
7139   }
7140   return NULL;
7141 }
7142
7143 static u_int scan_for_ret(u_int addr)
7144 {
7145   u_int limit = 0;
7146   u_int *mem;
7147
7148   mem = get_source_start(addr, &limit);
7149   if (mem == NULL)
7150     return addr;
7151
7152   if (limit > addr + 0x1000)
7153     limit = addr + 0x1000;
7154   for (; addr < limit; addr += 4, mem++) {
7155     if (*mem == 0x03e00008) // jr $ra
7156       return addr + 8;
7157   }
7158   return addr;
7159 }
7160
7161 struct savestate_block {
7162   uint32_t addr;
7163   uint32_t regflags;
7164 };
7165
7166 static int addr_cmp(const void *p1_, const void *p2_)
7167 {
7168   const struct savestate_block *p1 = p1_, *p2 = p2_;
7169   return p1->addr - p2->addr;
7170 }
7171
7172 int new_dynarec_save_blocks(void *save, int size)
7173 {
7174   struct savestate_block *blocks = save;
7175   int maxcount = size / sizeof(blocks[0]);
7176   struct savestate_block tmp_blocks[1024];
7177   struct ll_entry *head;
7178   int p, s, d, o, bcnt;
7179   u_int addr;
7180
7181   o = 0;
7182   for (p = 0; p < sizeof(jump_in) / sizeof(jump_in[0]); p++) {
7183     bcnt = 0;
7184     for (head = jump_in[p]; head != NULL; head = head->next) {
7185       tmp_blocks[bcnt].addr = head->vaddr;
7186       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
7187       bcnt++;
7188     }
7189     if (bcnt < 1)
7190       continue;
7191     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
7192
7193     addr = tmp_blocks[0].addr;
7194     for (s = d = 0; s < bcnt; s++) {
7195       if (tmp_blocks[s].addr < addr)
7196         continue;
7197       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
7198         tmp_blocks[d++] = tmp_blocks[s];
7199       addr = scan_for_ret(tmp_blocks[s].addr);
7200     }
7201
7202     if (o + d > maxcount)
7203       d = maxcount - o;
7204     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
7205     o += d;
7206   }
7207
7208   return o * sizeof(blocks[0]);
7209 }
7210
7211 void new_dynarec_load_blocks(const void *save, int size)
7212 {
7213   const struct savestate_block *blocks = save;
7214   int count = size / sizeof(blocks[0]);
7215   u_int regs_save[32];
7216   uint32_t f;
7217   int i, b;
7218
7219   get_addr(psxRegs.pc);
7220
7221   // change GPRs for speculation to at least partially work..
7222   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
7223   for (i = 1; i < 32; i++)
7224     psxRegs.GPR.r[i] = 0x80000000;
7225
7226   for (b = 0; b < count; b++) {
7227     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7228       if (f & 1)
7229         psxRegs.GPR.r[i] = 0x1f800000;
7230     }
7231
7232     get_addr(blocks[b].addr);
7233
7234     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
7235       if (f & 1)
7236         psxRegs.GPR.r[i] = 0x80000000;
7237     }
7238   }
7239
7240   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
7241 }
7242
7243 int new_recompile_block(int addr)
7244 {
7245   u_int pagelimit = 0;
7246   u_int state_rflags = 0;
7247   int i;
7248
7249   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7250   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
7251   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
7252   //if(debug)
7253   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
7254   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
7255   /*if(Count>=312978186) {
7256     rlist();
7257   }*/
7258   //rlist();
7259
7260   // this is just for speculation
7261   for (i = 1; i < 32; i++) {
7262     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
7263       state_rflags |= 1 << i;
7264   }
7265
7266   start = (u_int)addr&~3;
7267   //assert(((u_int)addr&1)==0);
7268   new_dynarec_did_compile=1;
7269   if (Config.HLE && start == 0x80001000) // hlecall
7270   {
7271     // XXX: is this enough? Maybe check hleSoftCall?
7272     void *beginning=start_block();
7273     u_int page=get_page(start);
7274
7275     invalid_code[start>>12]=0;
7276     emit_movimm(start,0);
7277     emit_writeword(0,(int)&pcaddr);
7278     emit_jmp((int)new_dyna_leave);
7279     literal_pool(0);
7280     end_block(beginning);
7281     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
7282     return 0;
7283   }
7284
7285   source = get_source_start(start, &pagelimit);
7286   if (source == NULL) {
7287     SysPrintf("Compile at bogus memory address: %08x\n", addr);
7288     exit(1);
7289   }
7290
7291   /* Pass 1: disassemble */
7292   /* Pass 2: register dependencies, branch targets */
7293   /* Pass 3: register allocation */
7294   /* Pass 4: branch dependencies */
7295   /* Pass 5: pre-alloc */
7296   /* Pass 6: optimize clean/dirty state */
7297   /* Pass 7: flag 32-bit registers */
7298   /* Pass 8: assembly */
7299   /* Pass 9: linker */
7300   /* Pass 10: garbage collection / free memory */
7301
7302   int j;
7303   int done=0;
7304   unsigned int type,op,op2;
7305
7306   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
7307
7308   /* Pass 1 disassembly */
7309
7310   for(i=0;!done;i++) {
7311     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
7312     minimum_free_regs[i]=0;
7313     opcode[i]=op=source[i]>>26;
7314     switch(op)
7315     {
7316       case 0x00: strcpy(insn[i],"special"); type=NI;
7317         op2=source[i]&0x3f;
7318         switch(op2)
7319         {
7320           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
7321           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
7322           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
7323           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
7324           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
7325           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
7326           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
7327           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
7328           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
7329           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
7330           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
7331           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
7332           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
7333           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
7334           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
7335           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
7336           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
7337           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
7338           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
7339           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
7340           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
7341           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
7342           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
7343           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
7344           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
7345           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
7346           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
7347           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
7348           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
7349           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
7350           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
7351           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
7352           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
7353           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
7354           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
7355 #if 0
7356           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
7357           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
7358           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
7359           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
7360           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
7361           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
7362           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
7363           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
7364           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
7365           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
7366           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
7367           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
7368           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
7369           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
7370           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
7371           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
7372           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
7373 #endif
7374         }
7375         break;
7376       case 0x01: strcpy(insn[i],"regimm"); type=NI;
7377         op2=(source[i]>>16)&0x1f;
7378         switch(op2)
7379         {
7380           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
7381           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
7382           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
7383           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
7384           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
7385           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
7386           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
7387           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
7388           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
7389           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
7390           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
7391           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
7392           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
7393           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
7394         }
7395         break;
7396       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
7397       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
7398       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
7399       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
7400       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
7401       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
7402       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
7403       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
7404       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
7405       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
7406       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
7407       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
7408       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
7409       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
7410       case 0x10: strcpy(insn[i],"cop0"); type=NI;
7411         op2=(source[i]>>21)&0x1f;
7412         switch(op2)
7413         {
7414           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
7415           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
7416           case 0x10: strcpy(insn[i],"tlb"); type=NI;
7417           switch(source[i]&0x3f)
7418           {
7419             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
7420             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
7421             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
7422             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
7423             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
7424             //case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
7425           }
7426         }
7427         break;
7428       case 0x11: strcpy(insn[i],"cop1"); type=NI;
7429         op2=(source[i]>>21)&0x1f;
7430         switch(op2)
7431         {
7432           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
7433           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
7434           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
7435           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
7436           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
7437           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
7438           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
7439           switch((source[i]>>16)&0x3)
7440           {
7441             case 0x00: strcpy(insn[i],"BC1F"); break;
7442             case 0x01: strcpy(insn[i],"BC1T"); break;
7443             case 0x02: strcpy(insn[i],"BC1FL"); break;
7444             case 0x03: strcpy(insn[i],"BC1TL"); break;
7445           }
7446           break;
7447           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
7448           switch(source[i]&0x3f)
7449           {
7450             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
7451             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
7452             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
7453             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
7454             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
7455             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
7456             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
7457             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
7458             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
7459             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
7460             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
7461             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
7462             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
7463             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
7464             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
7465             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
7466             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
7467             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
7468             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
7469             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
7470             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
7471             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
7472             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
7473             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
7474             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
7475             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
7476             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
7477             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
7478             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
7479             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
7480             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
7481             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
7482             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
7483             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
7484             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
7485           }
7486           break;
7487           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
7488           switch(source[i]&0x3f)
7489           {
7490             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
7491             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
7492             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
7493             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
7494             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
7495             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
7496             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
7497             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
7498             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
7499             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
7500             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
7501             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
7502             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
7503             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
7504             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
7505             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
7506             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
7507             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
7508             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
7509             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
7510             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
7511             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
7512             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
7513             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
7514             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
7515             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
7516             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
7517             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
7518             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
7519             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
7520             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
7521             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
7522             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
7523             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
7524             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
7525           }
7526           break;
7527           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
7528           switch(source[i]&0x3f)
7529           {
7530             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
7531             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
7532           }
7533           break;
7534           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
7535           switch(source[i]&0x3f)
7536           {
7537             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
7538             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
7539           }
7540           break;
7541         }
7542         break;
7543 #if 0
7544       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
7545       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
7546       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
7547       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
7548       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
7549       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
7550       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
7551       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
7552 #endif
7553       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
7554       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
7555       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
7556       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
7557       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
7558       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
7559       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
7560 #if 0
7561       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
7562 #endif
7563       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
7564       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
7565       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
7566       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
7567 #if 0
7568       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
7569       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
7570 #endif
7571       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
7572       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
7573       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
7574       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
7575 #if 0
7576       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
7577       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
7578       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
7579 #endif
7580       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
7581       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
7582 #if 0
7583       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
7584       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
7585       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
7586 #endif
7587       case 0x12: strcpy(insn[i],"COP2"); type=NI;
7588         op2=(source[i]>>21)&0x1f;
7589         //if (op2 & 0x10) {
7590         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
7591           if (gte_handlers[source[i]&0x3f]!=NULL) {
7592             if (gte_regnames[source[i]&0x3f]!=NULL)
7593               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
7594             else
7595               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
7596             type=C2OP;
7597           }
7598         }
7599         else switch(op2)
7600         {
7601           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
7602           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
7603           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
7604           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
7605         }
7606         break;
7607       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
7608       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
7609       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
7610       default: strcpy(insn[i],"???"); type=NI;
7611         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
7612         break;
7613     }
7614     itype[i]=type;
7615     opcode2[i]=op2;
7616     /* Get registers/immediates */
7617     lt1[i]=0;
7618     us1[i]=0;
7619     us2[i]=0;
7620     dep1[i]=0;
7621     dep2[i]=0;
7622     gte_rs[i]=gte_rt[i]=0;
7623     switch(type) {
7624       case LOAD:
7625         rs1[i]=(source[i]>>21)&0x1f;
7626         rs2[i]=0;
7627         rt1[i]=(source[i]>>16)&0x1f;
7628         rt2[i]=0;
7629         imm[i]=(short)source[i];
7630         break;
7631       case STORE:
7632       case STORELR:
7633         rs1[i]=(source[i]>>21)&0x1f;
7634         rs2[i]=(source[i]>>16)&0x1f;
7635         rt1[i]=0;
7636         rt2[i]=0;
7637         imm[i]=(short)source[i];
7638         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
7639         break;
7640       case LOADLR:
7641         // LWL/LWR only load part of the register,
7642         // therefore the target register must be treated as a source too
7643         rs1[i]=(source[i]>>21)&0x1f;
7644         rs2[i]=(source[i]>>16)&0x1f;
7645         rt1[i]=(source[i]>>16)&0x1f;
7646         rt2[i]=0;
7647         imm[i]=(short)source[i];
7648         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
7649         if(op==0x26) dep1[i]=rt1[i]; // LWR
7650         break;
7651       case IMM16:
7652         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
7653         else rs1[i]=(source[i]>>21)&0x1f;
7654         rs2[i]=0;
7655         rt1[i]=(source[i]>>16)&0x1f;
7656         rt2[i]=0;
7657         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
7658           imm[i]=(unsigned short)source[i];
7659         }else{
7660           imm[i]=(short)source[i];
7661         }
7662         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
7663         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
7664         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
7665         break;
7666       case UJUMP:
7667         rs1[i]=0;
7668         rs2[i]=0;
7669         rt1[i]=0;
7670         rt2[i]=0;
7671         // The JAL instruction writes to r31.
7672         if (op&1) {
7673           rt1[i]=31;
7674         }
7675         rs2[i]=CCREG;
7676         break;
7677       case RJUMP:
7678         rs1[i]=(source[i]>>21)&0x1f;
7679         rs2[i]=0;
7680         rt1[i]=0;
7681         rt2[i]=0;
7682         // The JALR instruction writes to rd.
7683         if (op2&1) {
7684           rt1[i]=(source[i]>>11)&0x1f;
7685         }
7686         rs2[i]=CCREG;
7687         break;
7688       case CJUMP:
7689         rs1[i]=(source[i]>>21)&0x1f;
7690         rs2[i]=(source[i]>>16)&0x1f;
7691         rt1[i]=0;
7692         rt2[i]=0;
7693         if(op&2) { // BGTZ/BLEZ
7694           rs2[i]=0;
7695         }
7696         us1[i]=rs1[i];
7697         us2[i]=rs2[i];
7698         likely[i]=op>>4;
7699         break;
7700       case SJUMP:
7701         rs1[i]=(source[i]>>21)&0x1f;
7702         rs2[i]=CCREG;
7703         rt1[i]=0;
7704         rt2[i]=0;
7705         us1[i]=rs1[i];
7706         if(op2&0x10) { // BxxAL
7707           rt1[i]=31;
7708           // NOTE: If the branch is not taken, r31 is still overwritten
7709         }
7710         likely[i]=(op2&2)>>1;
7711         break;
7712       case FJUMP:
7713         rs1[i]=FSREG;
7714         rs2[i]=CSREG;
7715         rt1[i]=0;
7716         rt2[i]=0;
7717         likely[i]=((source[i])>>17)&1;
7718         break;
7719       case ALU:
7720         rs1[i]=(source[i]>>21)&0x1f; // source
7721         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
7722         rt1[i]=(source[i]>>11)&0x1f; // destination
7723         rt2[i]=0;
7724         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7725           us1[i]=rs1[i];us2[i]=rs2[i];
7726         }
7727         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7728           dep1[i]=rs1[i];dep2[i]=rs2[i];
7729         }
7730         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
7731           dep1[i]=rs1[i];dep2[i]=rs2[i];
7732         }
7733         break;
7734       case MULTDIV:
7735         rs1[i]=(source[i]>>21)&0x1f; // source
7736         rs2[i]=(source[i]>>16)&0x1f; // divisor
7737         rt1[i]=HIREG;
7738         rt2[i]=LOREG;
7739         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7740           us1[i]=rs1[i];us2[i]=rs2[i];
7741         }
7742         break;
7743       case MOV:
7744         rs1[i]=0;
7745         rs2[i]=0;
7746         rt1[i]=0;
7747         rt2[i]=0;
7748         if(op2==0x10) rs1[i]=HIREG; // MFHI
7749         if(op2==0x11) rt1[i]=HIREG; // MTHI
7750         if(op2==0x12) rs1[i]=LOREG; // MFLO
7751         if(op2==0x13) rt1[i]=LOREG; // MTLO
7752         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
7753         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
7754         dep1[i]=rs1[i];
7755         break;
7756       case SHIFT:
7757         rs1[i]=(source[i]>>16)&0x1f; // target of shift
7758         rs2[i]=(source[i]>>21)&0x1f; // shift amount
7759         rt1[i]=(source[i]>>11)&0x1f; // destination
7760         rt2[i]=0;
7761         // DSLLV/DSRLV/DSRAV are 64-bit
7762         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
7763         break;
7764       case SHIFTIMM:
7765         rs1[i]=(source[i]>>16)&0x1f;
7766         rs2[i]=0;
7767         rt1[i]=(source[i]>>11)&0x1f;
7768         rt2[i]=0;
7769         imm[i]=(source[i]>>6)&0x1f;
7770         // DSxx32 instructions
7771         if(op2>=0x3c) imm[i]|=0x20;
7772         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
7773         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
7774         break;
7775       case COP0:
7776         rs1[i]=0;
7777         rs2[i]=0;
7778         rt1[i]=0;
7779         rt2[i]=0;
7780         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
7781         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
7782         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
7783         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
7784         break;
7785       case COP1:
7786         rs1[i]=0;
7787         rs2[i]=0;
7788         rt1[i]=0;
7789         rt2[i]=0;
7790         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
7791         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
7792         if(op2==5) us1[i]=rs1[i]; // DMTC1
7793         rs2[i]=CSREG;
7794         break;
7795       case COP2:
7796         rs1[i]=0;
7797         rs2[i]=0;
7798         rt1[i]=0;
7799         rt2[i]=0;
7800         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
7801         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
7802         rs2[i]=CSREG;
7803         int gr=(source[i]>>11)&0x1F;
7804         switch(op2)
7805         {
7806           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
7807           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
7808           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
7809           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
7810         }
7811         break;
7812       case C1LS:
7813         rs1[i]=(source[i]>>21)&0x1F;
7814         rs2[i]=CSREG;
7815         rt1[i]=0;
7816         rt2[i]=0;
7817         imm[i]=(short)source[i];
7818         break;
7819       case C2LS:
7820         rs1[i]=(source[i]>>21)&0x1F;
7821         rs2[i]=0;
7822         rt1[i]=0;
7823         rt2[i]=0;
7824         imm[i]=(short)source[i];
7825         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
7826         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
7827         break;
7828       case C2OP:
7829         rs1[i]=0;
7830         rs2[i]=0;
7831         rt1[i]=0;
7832         rt2[i]=0;
7833         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
7834         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
7835         gte_rt[i]|=1ll<<63; // every op changes flags
7836         if((source[i]&0x3f)==GTE_MVMVA) {
7837           int v = (source[i] >> 15) & 3;
7838           gte_rs[i]&=~0xe3fll;
7839           if(v==3) gte_rs[i]|=0xe00ll;
7840           else gte_rs[i]|=3ll<<(v*2);
7841         }
7842         break;
7843       case FLOAT:
7844       case FCONV:
7845         rs1[i]=0;
7846         rs2[i]=CSREG;
7847         rt1[i]=0;
7848         rt2[i]=0;
7849         break;
7850       case FCOMP:
7851         rs1[i]=FSREG;
7852         rs2[i]=CSREG;
7853         rt1[i]=FSREG;
7854         rt2[i]=0;
7855         break;
7856       case SYSCALL:
7857       case HLECALL:
7858       case INTCALL:
7859         rs1[i]=CCREG;
7860         rs2[i]=0;
7861         rt1[i]=0;
7862         rt2[i]=0;
7863         break;
7864       default:
7865         rs1[i]=0;
7866         rs2[i]=0;
7867         rt1[i]=0;
7868         rt2[i]=0;
7869     }
7870     /* Calculate branch target addresses */
7871     if(type==UJUMP)
7872       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
7873     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
7874       ba[i]=start+i*4+8; // Ignore never taken branch
7875     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
7876       ba[i]=start+i*4+8; // Ignore never taken branch
7877     else if(type==CJUMP||type==SJUMP||type==FJUMP)
7878       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
7879     else ba[i]=-1;
7880     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
7881       int do_in_intrp=0;
7882       // branch in delay slot?
7883       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7884         // don't handle first branch and call interpreter if it's hit
7885         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
7886         do_in_intrp=1;
7887       }
7888       // basic load delay detection
7889       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
7890         int t=(ba[i-1]-start)/4;
7891         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
7892           // jump target wants DS result - potential load delay effect
7893           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
7894           do_in_intrp=1;
7895           bt[t+1]=1; // expected return from interpreter
7896         }
7897         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
7898               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
7899           // v0 overwrite like this is a sign of trouble, bail out
7900           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
7901           do_in_intrp=1;
7902         }
7903       }
7904       if(do_in_intrp) {
7905         rs1[i-1]=CCREG;
7906         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
7907         ba[i-1]=-1;
7908         itype[i-1]=INTCALL;
7909         done=2;
7910         i--; // don't compile the DS
7911       }
7912     }
7913     /* Is this the end of the block? */
7914     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
7915       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
7916         done=2;
7917       }
7918       else {
7919         if(stop_after_jal) done=1;
7920         // Stop on BREAK
7921         if((source[i+1]&0xfc00003f)==0x0d) done=1;
7922       }
7923       // Don't recompile stuff that's already compiled
7924       if(check_addr(start+i*4+4)) done=1;
7925       // Don't get too close to the limit
7926       if(i>MAXBLOCK/2) done=1;
7927     }
7928     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
7929     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
7930     if(done==2) {
7931       // Does the block continue due to a branch?
7932       for(j=i-1;j>=0;j--)
7933       {
7934         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
7935         if(ba[j]==start+i*4+4) done=j=0;
7936         if(ba[j]==start+i*4+8) done=j=0;
7937       }
7938     }
7939     //assert(i<MAXBLOCK-1);
7940     if(start+i*4==pagelimit-4) done=1;
7941     assert(start+i*4<pagelimit);
7942     if (i==MAXBLOCK-1) done=1;
7943     // Stop if we're compiling junk
7944     if(itype[i]==NI&&opcode[i]==0x11) {
7945       done=stop_after_jal=1;
7946       SysPrintf("Disabled speculative precompilation\n");
7947     }
7948   }
7949   slen=i;
7950   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
7951     if(start+i*4==pagelimit) {
7952       itype[i-1]=SPAN;
7953     }
7954   }
7955   assert(slen>0);
7956
7957   /* Pass 2 - Register dependencies and branch targets */
7958
7959   unneeded_registers(0,slen-1,0);
7960
7961   /* Pass 3 - Register allocation */
7962
7963   struct regstat current; // Current register allocations/status
7964   current.is32=1;
7965   current.dirty=0;
7966   current.u=unneeded_reg[0];
7967   current.uu=unneeded_reg_upper[0];
7968   clear_all_regs(current.regmap);
7969   alloc_reg(&current,0,CCREG);
7970   dirty_reg(&current,CCREG);
7971   current.isconst=0;
7972   current.wasconst=0;
7973   current.waswritten=0;
7974   int ds=0;
7975   int cc=0;
7976   int hr=-1;
7977
7978   if((u_int)addr&1) {
7979     // First instruction is delay slot
7980     cc=-1;
7981     bt[1]=1;
7982     ds=1;
7983     unneeded_reg[0]=1;
7984     unneeded_reg_upper[0]=1;
7985     current.regmap[HOST_BTREG]=BTREG;
7986   }
7987
7988   for(i=0;i<slen;i++)
7989   {
7990     if(bt[i])
7991     {
7992       int hr;
7993       for(hr=0;hr<HOST_REGS;hr++)
7994       {
7995         // Is this really necessary?
7996         if(current.regmap[hr]==0) current.regmap[hr]=-1;
7997       }
7998       current.isconst=0;
7999       current.waswritten=0;
8000     }
8001     if(i>1)
8002     {
8003       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8004       {
8005         if(rs1[i-2]==0||rs2[i-2]==0)
8006         {
8007           if(rs1[i-2]) {
8008             current.is32|=1LL<<rs1[i-2];
8009             int hr=get_reg(current.regmap,rs1[i-2]|64);
8010             if(hr>=0) current.regmap[hr]=-1;
8011           }
8012           if(rs2[i-2]) {
8013             current.is32|=1LL<<rs2[i-2];
8014             int hr=get_reg(current.regmap,rs2[i-2]|64);
8015             if(hr>=0) current.regmap[hr]=-1;
8016           }
8017         }
8018       }
8019     }
8020     current.is32=-1LL;
8021
8022     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8023     regs[i].wasconst=current.isconst;
8024     regs[i].was32=current.is32;
8025     regs[i].wasdirty=current.dirty;
8026     regs[i].loadedconst=0;
8027     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8028       if(i+1<slen) {
8029         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8030         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8031         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8032         current.u|=1;
8033         current.uu|=1;
8034       } else {
8035         current.u=1;
8036         current.uu=1;
8037       }
8038     } else {
8039       if(i+1<slen) {
8040         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8041         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8042         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8043         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8044         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8045         current.u|=1;
8046         current.uu|=1;
8047       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
8048     }
8049     is_ds[i]=ds;
8050     if(ds) {
8051       ds=0; // Skip delay slot, already allocated as part of branch
8052       // ...but we need to alloc it in case something jumps here
8053       if(i+1<slen) {
8054         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8055         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8056       }else{
8057         current.u=branch_unneeded_reg[i-1];
8058         current.uu=branch_unneeded_reg_upper[i-1];
8059       }
8060       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8061       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8062       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8063       current.u|=1;
8064       current.uu|=1;
8065       struct regstat temp;
8066       memcpy(&temp,&current,sizeof(current));
8067       temp.wasdirty=temp.dirty;
8068       temp.was32=temp.is32;
8069       // TODO: Take into account unconditional branches, as below
8070       delayslot_alloc(&temp,i);
8071       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8072       regs[i].wasdirty=temp.wasdirty;
8073       regs[i].was32=temp.was32;
8074       regs[i].dirty=temp.dirty;
8075       regs[i].is32=temp.is32;
8076       regs[i].isconst=0;
8077       regs[i].wasconst=0;
8078       current.isconst=0;
8079       // Create entry (branch target) regmap
8080       for(hr=0;hr<HOST_REGS;hr++)
8081       {
8082         int r=temp.regmap[hr];
8083         if(r>=0) {
8084           if(r!=regmap_pre[i][hr]) {
8085             regs[i].regmap_entry[hr]=-1;
8086           }
8087           else
8088           {
8089             if(r<64){
8090               if((current.u>>r)&1) {
8091                 regs[i].regmap_entry[hr]=-1;
8092                 regs[i].regmap[hr]=-1;
8093                 //Don't clear regs in the delay slot as the branch might need them
8094                 //current.regmap[hr]=-1;
8095               }else
8096                 regs[i].regmap_entry[hr]=r;
8097             }
8098             else {
8099               if((current.uu>>(r&63))&1) {
8100                 regs[i].regmap_entry[hr]=-1;
8101                 regs[i].regmap[hr]=-1;
8102                 //Don't clear regs in the delay slot as the branch might need them
8103                 //current.regmap[hr]=-1;
8104               }else
8105                 regs[i].regmap_entry[hr]=r;
8106             }
8107           }
8108         } else {
8109           // First instruction expects CCREG to be allocated
8110           if(i==0&&hr==HOST_CCREG)
8111             regs[i].regmap_entry[hr]=CCREG;
8112           else
8113             regs[i].regmap_entry[hr]=-1;
8114         }
8115       }
8116     }
8117     else { // Not delay slot
8118       switch(itype[i]) {
8119         case UJUMP:
8120           //current.isconst=0; // DEBUG
8121           //current.wasconst=0; // DEBUG
8122           //regs[i].wasconst=0; // DEBUG
8123           clear_const(&current,rt1[i]);
8124           alloc_cc(&current,i);
8125           dirty_reg(&current,CCREG);
8126           if (rt1[i]==31) {
8127             alloc_reg(&current,i,31);
8128             dirty_reg(&current,31);
8129             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
8130             //assert(rt1[i+1]!=rt1[i]);
8131             #ifdef REG_PREFETCH
8132             alloc_reg(&current,i,PTEMP);
8133             #endif
8134             //current.is32|=1LL<<rt1[i];
8135           }
8136           ooo[i]=1;
8137           delayslot_alloc(&current,i+1);
8138           //current.isconst=0; // DEBUG
8139           ds=1;
8140           //printf("i=%d, isconst=%x\n",i,current.isconst);
8141           break;
8142         case RJUMP:
8143           //current.isconst=0;
8144           //current.wasconst=0;
8145           //regs[i].wasconst=0;
8146           clear_const(&current,rs1[i]);
8147           clear_const(&current,rt1[i]);
8148           alloc_cc(&current,i);
8149           dirty_reg(&current,CCREG);
8150           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
8151             alloc_reg(&current,i,rs1[i]);
8152             if (rt1[i]!=0) {
8153               alloc_reg(&current,i,rt1[i]);
8154               dirty_reg(&current,rt1[i]);
8155               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
8156               assert(rt1[i+1]!=rt1[i]);
8157               #ifdef REG_PREFETCH
8158               alloc_reg(&current,i,PTEMP);
8159               #endif
8160             }
8161             #ifdef USE_MINI_HT
8162             if(rs1[i]==31) { // JALR
8163               alloc_reg(&current,i,RHASH);
8164               #ifndef HOST_IMM_ADDR32
8165               alloc_reg(&current,i,RHTBL);
8166               #endif
8167             }
8168             #endif
8169             delayslot_alloc(&current,i+1);
8170           } else {
8171             // The delay slot overwrites our source register,
8172             // allocate a temporary register to hold the old value.
8173             current.isconst=0;
8174             current.wasconst=0;
8175             regs[i].wasconst=0;
8176             delayslot_alloc(&current,i+1);
8177             current.isconst=0;
8178             alloc_reg(&current,i,RTEMP);
8179           }
8180           //current.isconst=0; // DEBUG
8181           ooo[i]=1;
8182           ds=1;
8183           break;
8184         case CJUMP:
8185           //current.isconst=0;
8186           //current.wasconst=0;
8187           //regs[i].wasconst=0;
8188           clear_const(&current,rs1[i]);
8189           clear_const(&current,rs2[i]);
8190           if((opcode[i]&0x3E)==4) // BEQ/BNE
8191           {
8192             alloc_cc(&current,i);
8193             dirty_reg(&current,CCREG);
8194             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8195             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8196             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8197             {
8198               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8199               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8200             }
8201             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
8202                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
8203               // The delay slot overwrites one of our conditions.
8204               // Allocate the branch condition registers instead.
8205               current.isconst=0;
8206               current.wasconst=0;
8207               regs[i].wasconst=0;
8208               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8209               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
8210               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8211               {
8212                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8213                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
8214               }
8215             }
8216             else
8217             {
8218               ooo[i]=1;
8219               delayslot_alloc(&current,i+1);
8220             }
8221           }
8222           else
8223           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
8224           {
8225             alloc_cc(&current,i);
8226             dirty_reg(&current,CCREG);
8227             alloc_reg(&current,i,rs1[i]);
8228             if(!(current.is32>>rs1[i]&1))
8229             {
8230               alloc_reg64(&current,i,rs1[i]);
8231             }
8232             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
8233               // The delay slot overwrites one of our conditions.
8234               // Allocate the branch condition registers instead.
8235               current.isconst=0;
8236               current.wasconst=0;
8237               regs[i].wasconst=0;
8238               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8239               if(!((current.is32>>rs1[i])&1))
8240               {
8241                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8242               }
8243             }
8244             else
8245             {
8246               ooo[i]=1;
8247               delayslot_alloc(&current,i+1);
8248             }
8249           }
8250           else
8251           // Don't alloc the delay slot yet because we might not execute it
8252           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
8253           {
8254             current.isconst=0;
8255             current.wasconst=0;
8256             regs[i].wasconst=0;
8257             alloc_cc(&current,i);
8258             dirty_reg(&current,CCREG);
8259             alloc_reg(&current,i,rs1[i]);
8260             alloc_reg(&current,i,rs2[i]);
8261             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
8262             {
8263               alloc_reg64(&current,i,rs1[i]);
8264               alloc_reg64(&current,i,rs2[i]);
8265             }
8266           }
8267           else
8268           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
8269           {
8270             current.isconst=0;
8271             current.wasconst=0;
8272             regs[i].wasconst=0;
8273             alloc_cc(&current,i);
8274             dirty_reg(&current,CCREG);
8275             alloc_reg(&current,i,rs1[i]);
8276             if(!(current.is32>>rs1[i]&1))
8277             {
8278               alloc_reg64(&current,i,rs1[i]);
8279             }
8280           }
8281           ds=1;
8282           //current.isconst=0;
8283           break;
8284         case SJUMP:
8285           //current.isconst=0;
8286           //current.wasconst=0;
8287           //regs[i].wasconst=0;
8288           clear_const(&current,rs1[i]);
8289           clear_const(&current,rt1[i]);
8290           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
8291           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
8292           {
8293             alloc_cc(&current,i);
8294             dirty_reg(&current,CCREG);
8295             alloc_reg(&current,i,rs1[i]);
8296             if(!(current.is32>>rs1[i]&1))
8297             {
8298               alloc_reg64(&current,i,rs1[i]);
8299             }
8300             if (rt1[i]==31) { // BLTZAL/BGEZAL
8301               alloc_reg(&current,i,31);
8302               dirty_reg(&current,31);
8303               //#ifdef REG_PREFETCH
8304               //alloc_reg(&current,i,PTEMP);
8305               //#endif
8306               //current.is32|=1LL<<rt1[i];
8307             }
8308             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
8309                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
8310               // Allocate the branch condition registers instead.
8311               current.isconst=0;
8312               current.wasconst=0;
8313               regs[i].wasconst=0;
8314               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
8315               if(!((current.is32>>rs1[i])&1))
8316               {
8317                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
8318               }
8319             }
8320             else
8321             {
8322               ooo[i]=1;
8323               delayslot_alloc(&current,i+1);
8324             }
8325           }
8326           else
8327           // Don't alloc the delay slot yet because we might not execute it
8328           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
8329           {
8330             current.isconst=0;
8331             current.wasconst=0;
8332             regs[i].wasconst=0;
8333             alloc_cc(&current,i);
8334             dirty_reg(&current,CCREG);
8335             alloc_reg(&current,i,rs1[i]);
8336             if(!(current.is32>>rs1[i]&1))
8337             {
8338               alloc_reg64(&current,i,rs1[i]);
8339             }
8340           }
8341           ds=1;
8342           //current.isconst=0;
8343           break;
8344         case FJUMP:
8345           current.isconst=0;
8346           current.wasconst=0;
8347           regs[i].wasconst=0;
8348           if(likely[i]==0) // BC1F/BC1T
8349           {
8350             // TODO: Theoretically we can run out of registers here on x86.
8351             // The delay slot can allocate up to six, and we need to check
8352             // CSREG before executing the delay slot.  Possibly we can drop
8353             // the cycle count and then reload it after checking that the
8354             // FPU is in a usable state, or don't do out-of-order execution.
8355             alloc_cc(&current,i);
8356             dirty_reg(&current,CCREG);
8357             alloc_reg(&current,i,FSREG);
8358             alloc_reg(&current,i,CSREG);
8359             if(itype[i+1]==FCOMP) {
8360               // The delay slot overwrites the branch condition.
8361               // Allocate the branch condition registers instead.
8362               alloc_cc(&current,i);
8363               dirty_reg(&current,CCREG);
8364               alloc_reg(&current,i,CSREG);
8365               alloc_reg(&current,i,FSREG);
8366             }
8367             else {
8368               ooo[i]=1;
8369               delayslot_alloc(&current,i+1);
8370               alloc_reg(&current,i+1,CSREG);
8371             }
8372           }
8373           else
8374           // Don't alloc the delay slot yet because we might not execute it
8375           if(likely[i]) // BC1FL/BC1TL
8376           {
8377             alloc_cc(&current,i);
8378             dirty_reg(&current,CCREG);
8379             alloc_reg(&current,i,CSREG);
8380             alloc_reg(&current,i,FSREG);
8381           }
8382           ds=1;
8383           current.isconst=0;
8384           break;
8385         case IMM16:
8386           imm16_alloc(&current,i);
8387           break;
8388         case LOAD:
8389         case LOADLR:
8390           load_alloc(&current,i);
8391           break;
8392         case STORE:
8393         case STORELR:
8394           store_alloc(&current,i);
8395           break;
8396         case ALU:
8397           alu_alloc(&current,i);
8398           break;
8399         case SHIFT:
8400           shift_alloc(&current,i);
8401           break;
8402         case MULTDIV:
8403           multdiv_alloc(&current,i);
8404           break;
8405         case SHIFTIMM:
8406           shiftimm_alloc(&current,i);
8407           break;
8408         case MOV:
8409           mov_alloc(&current,i);
8410           break;
8411         case COP0:
8412           cop0_alloc(&current,i);
8413           break;
8414         case COP1:
8415         case COP2:
8416           cop1_alloc(&current,i);
8417           break;
8418         case C1LS:
8419           c1ls_alloc(&current,i);
8420           break;
8421         case C2LS:
8422           c2ls_alloc(&current,i);
8423           break;
8424         case C2OP:
8425           c2op_alloc(&current,i);
8426           break;
8427         case FCONV:
8428           fconv_alloc(&current,i);
8429           break;
8430         case FLOAT:
8431           float_alloc(&current,i);
8432           break;
8433         case FCOMP:
8434           fcomp_alloc(&current,i);
8435           break;
8436         case SYSCALL:
8437         case HLECALL:
8438         case INTCALL:
8439           syscall_alloc(&current,i);
8440           break;
8441         case SPAN:
8442           pagespan_alloc(&current,i);
8443           break;
8444       }
8445
8446       // Drop the upper half of registers that have become 32-bit
8447       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
8448       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8449         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8450         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8451         current.uu|=1;
8452       } else {
8453         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
8454         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8455         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8456         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8457         current.uu|=1;
8458       }
8459
8460       // Create entry (branch target) regmap
8461       for(hr=0;hr<HOST_REGS;hr++)
8462       {
8463         int r,or;
8464         r=current.regmap[hr];
8465         if(r>=0) {
8466           if(r!=regmap_pre[i][hr]) {
8467             // TODO: delay slot (?)
8468             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
8469             if(or<0||(r&63)>=TEMPREG){
8470               regs[i].regmap_entry[hr]=-1;
8471             }
8472             else
8473             {
8474               // Just move it to a different register
8475               regs[i].regmap_entry[hr]=r;
8476               // If it was dirty before, it's still dirty
8477               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
8478             }
8479           }
8480           else
8481           {
8482             // Unneeded
8483             if(r==0){
8484               regs[i].regmap_entry[hr]=0;
8485             }
8486             else
8487             if(r<64){
8488               if((current.u>>r)&1) {
8489                 regs[i].regmap_entry[hr]=-1;
8490                 //regs[i].regmap[hr]=-1;
8491                 current.regmap[hr]=-1;
8492               }else
8493                 regs[i].regmap_entry[hr]=r;
8494             }
8495             else {
8496               if((current.uu>>(r&63))&1) {
8497                 regs[i].regmap_entry[hr]=-1;
8498                 //regs[i].regmap[hr]=-1;
8499                 current.regmap[hr]=-1;
8500               }else
8501                 regs[i].regmap_entry[hr]=r;
8502             }
8503           }
8504         } else {
8505           // Branches expect CCREG to be allocated at the target
8506           if(regmap_pre[i][hr]==CCREG)
8507             regs[i].regmap_entry[hr]=CCREG;
8508           else
8509             regs[i].regmap_entry[hr]=-1;
8510         }
8511       }
8512       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
8513     }
8514
8515     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
8516       current.waswritten|=1<<rs1[i-1];
8517     current.waswritten&=~(1<<rt1[i]);
8518     current.waswritten&=~(1<<rt2[i]);
8519     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
8520       current.waswritten&=~(1<<rs1[i]);
8521
8522     /* Branch post-alloc */
8523     if(i>0)
8524     {
8525       current.was32=current.is32;
8526       current.wasdirty=current.dirty;
8527       switch(itype[i-1]) {
8528         case UJUMP:
8529           memcpy(&branch_regs[i-1],&current,sizeof(current));
8530           branch_regs[i-1].isconst=0;
8531           branch_regs[i-1].wasconst=0;
8532           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8533           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8534           alloc_cc(&branch_regs[i-1],i-1);
8535           dirty_reg(&branch_regs[i-1],CCREG);
8536           if(rt1[i-1]==31) { // JAL
8537             alloc_reg(&branch_regs[i-1],i-1,31);
8538             dirty_reg(&branch_regs[i-1],31);
8539             branch_regs[i-1].is32|=1LL<<31;
8540           }
8541           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8542           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8543           break;
8544         case RJUMP:
8545           memcpy(&branch_regs[i-1],&current,sizeof(current));
8546           branch_regs[i-1].isconst=0;
8547           branch_regs[i-1].wasconst=0;
8548           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8549           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8550           alloc_cc(&branch_regs[i-1],i-1);
8551           dirty_reg(&branch_regs[i-1],CCREG);
8552           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
8553           if(rt1[i-1]!=0) { // JALR
8554             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
8555             dirty_reg(&branch_regs[i-1],rt1[i-1]);
8556             branch_regs[i-1].is32|=1LL<<rt1[i-1];
8557           }
8558           #ifdef USE_MINI_HT
8559           if(rs1[i-1]==31) { // JALR
8560             alloc_reg(&branch_regs[i-1],i-1,RHASH);
8561             #ifndef HOST_IMM_ADDR32
8562             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
8563             #endif
8564           }
8565           #endif
8566           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8567           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8568           break;
8569         case CJUMP:
8570           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
8571           {
8572             alloc_cc(&current,i-1);
8573             dirty_reg(&current,CCREG);
8574             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
8575                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
8576               // The delay slot overwrote one of our conditions
8577               // Delay slot goes after the test (in order)
8578               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8579               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8580               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8581               current.u|=1;
8582               current.uu|=1;
8583               delayslot_alloc(&current,i);
8584               current.isconst=0;
8585             }
8586             else
8587             {
8588               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
8589               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
8590               // Alloc the branch condition registers
8591               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
8592               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
8593               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
8594               {
8595                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
8596                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
8597               }
8598             }
8599             memcpy(&branch_regs[i-1],&current,sizeof(current));
8600             branch_regs[i-1].isconst=0;
8601             branch_regs[i-1].wasconst=0;
8602             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8603             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8604           }
8605           else
8606           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
8607           {
8608             alloc_cc(&current,i-1);
8609             dirty_reg(&current,CCREG);
8610             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8611               // The delay slot overwrote the branch condition
8612               // Delay slot goes after the test (in order)
8613               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8614               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8615               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8616               current.u|=1;
8617               current.uu|=1;
8618               delayslot_alloc(&current,i);
8619               current.isconst=0;
8620             }
8621             else
8622             {
8623               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8624               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8625               // Alloc the branch condition register
8626               alloc_reg(&current,i-1,rs1[i-1]);
8627               if(!(current.is32>>rs1[i-1]&1))
8628               {
8629                 alloc_reg64(&current,i-1,rs1[i-1]);
8630               }
8631             }
8632             memcpy(&branch_regs[i-1],&current,sizeof(current));
8633             branch_regs[i-1].isconst=0;
8634             branch_regs[i-1].wasconst=0;
8635             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8636             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8637           }
8638           else
8639           // Alloc the delay slot in case the branch is taken
8640           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
8641           {
8642             memcpy(&branch_regs[i-1],&current,sizeof(current));
8643             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8644             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8645             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8646             alloc_cc(&branch_regs[i-1],i);
8647             dirty_reg(&branch_regs[i-1],CCREG);
8648             delayslot_alloc(&branch_regs[i-1],i);
8649             branch_regs[i-1].isconst=0;
8650             alloc_reg(&current,i,CCREG); // Not taken path
8651             dirty_reg(&current,CCREG);
8652             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8653           }
8654           else
8655           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
8656           {
8657             memcpy(&branch_regs[i-1],&current,sizeof(current));
8658             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8659             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8660             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8661             alloc_cc(&branch_regs[i-1],i);
8662             dirty_reg(&branch_regs[i-1],CCREG);
8663             delayslot_alloc(&branch_regs[i-1],i);
8664             branch_regs[i-1].isconst=0;
8665             alloc_reg(&current,i,CCREG); // Not taken path
8666             dirty_reg(&current,CCREG);
8667             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8668           }
8669           break;
8670         case SJUMP:
8671           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
8672           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
8673           {
8674             alloc_cc(&current,i-1);
8675             dirty_reg(&current,CCREG);
8676             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
8677               // The delay slot overwrote the branch condition
8678               // Delay slot goes after the test (in order)
8679               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8680               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8681               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8682               current.u|=1;
8683               current.uu|=1;
8684               delayslot_alloc(&current,i);
8685               current.isconst=0;
8686             }
8687             else
8688             {
8689               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8690               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8691               // Alloc the branch condition register
8692               alloc_reg(&current,i-1,rs1[i-1]);
8693               if(!(current.is32>>rs1[i-1]&1))
8694               {
8695                 alloc_reg64(&current,i-1,rs1[i-1]);
8696               }
8697             }
8698             memcpy(&branch_regs[i-1],&current,sizeof(current));
8699             branch_regs[i-1].isconst=0;
8700             branch_regs[i-1].wasconst=0;
8701             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8702             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
8703           }
8704           else
8705           // Alloc the delay slot in case the branch is taken
8706           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
8707           {
8708             memcpy(&branch_regs[i-1],&current,sizeof(current));
8709             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8710             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8711             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8712             alloc_cc(&branch_regs[i-1],i);
8713             dirty_reg(&branch_regs[i-1],CCREG);
8714             delayslot_alloc(&branch_regs[i-1],i);
8715             branch_regs[i-1].isconst=0;
8716             alloc_reg(&current,i,CCREG); // Not taken path
8717             dirty_reg(&current,CCREG);
8718             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8719           }
8720           // FIXME: BLTZAL/BGEZAL
8721           if(opcode2[i-1]&0x10) { // BxxZAL
8722             alloc_reg(&branch_regs[i-1],i-1,31);
8723             dirty_reg(&branch_regs[i-1],31);
8724             branch_regs[i-1].is32|=1LL<<31;
8725           }
8726           break;
8727         case FJUMP:
8728           if(likely[i-1]==0) // BC1F/BC1T
8729           {
8730             alloc_cc(&current,i-1);
8731             dirty_reg(&current,CCREG);
8732             if(itype[i]==FCOMP) {
8733               // The delay slot overwrote the branch condition
8734               // Delay slot goes after the test (in order)
8735               delayslot_alloc(&current,i);
8736               current.isconst=0;
8737             }
8738             else
8739             {
8740               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
8741               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
8742               // Alloc the branch condition register
8743               alloc_reg(&current,i-1,FSREG);
8744             }
8745             memcpy(&branch_regs[i-1],&current,sizeof(current));
8746             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
8747           }
8748           else // BC1FL/BC1TL
8749           {
8750             // Alloc the delay slot in case the branch is taken
8751             memcpy(&branch_regs[i-1],&current,sizeof(current));
8752             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8753             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
8754             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
8755             alloc_cc(&branch_regs[i-1],i);
8756             dirty_reg(&branch_regs[i-1],CCREG);
8757             delayslot_alloc(&branch_regs[i-1],i);
8758             branch_regs[i-1].isconst=0;
8759             alloc_reg(&current,i,CCREG); // Not taken path
8760             dirty_reg(&current,CCREG);
8761             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
8762           }
8763           break;
8764       }
8765
8766       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
8767       {
8768         if(rt1[i-1]==31) // JAL/JALR
8769         {
8770           // Subroutine call will return here, don't alloc any registers
8771           current.is32=1;
8772           current.dirty=0;
8773           clear_all_regs(current.regmap);
8774           alloc_reg(&current,i,CCREG);
8775           dirty_reg(&current,CCREG);
8776         }
8777         else if(i+1<slen)
8778         {
8779           // Internal branch will jump here, match registers to caller
8780           current.is32=0x3FFFFFFFFLL;
8781           current.dirty=0;
8782           clear_all_regs(current.regmap);
8783           alloc_reg(&current,i,CCREG);
8784           dirty_reg(&current,CCREG);
8785           for(j=i-1;j>=0;j--)
8786           {
8787             if(ba[j]==start+i*4+4) {
8788               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
8789               current.is32=branch_regs[j].is32;
8790               current.dirty=branch_regs[j].dirty;
8791               break;
8792             }
8793           }
8794           while(j>=0) {
8795             if(ba[j]==start+i*4+4) {
8796               for(hr=0;hr<HOST_REGS;hr++) {
8797                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
8798                   current.regmap[hr]=-1;
8799                 }
8800                 current.is32&=branch_regs[j].is32;
8801                 current.dirty&=branch_regs[j].dirty;
8802               }
8803             }
8804             j--;
8805           }
8806         }
8807       }
8808     }
8809
8810     // Count cycles in between branches
8811     ccadj[i]=cc;
8812     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
8813     {
8814       cc=0;
8815     }
8816 #if !defined(DRC_DBG)
8817     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
8818     {
8819       // GTE runs in parallel until accessed, divide by 2 for a rough guess
8820       cc+=gte_cycletab[source[i]&0x3f]/2;
8821     }
8822     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
8823     {
8824       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
8825     }
8826     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
8827     {
8828       cc+=4;
8829     }
8830     else if(itype[i]==C2LS)
8831     {
8832       cc+=4;
8833     }
8834 #endif
8835     else
8836     {
8837       cc++;
8838     }
8839
8840     flush_dirty_uppers(&current);
8841     if(!is_ds[i]) {
8842       regs[i].is32=current.is32;
8843       regs[i].dirty=current.dirty;
8844       regs[i].isconst=current.isconst;
8845       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
8846     }
8847     for(hr=0;hr<HOST_REGS;hr++) {
8848       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
8849         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
8850           regs[i].wasconst&=~(1<<hr);
8851         }
8852       }
8853     }
8854     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
8855     regs[i].waswritten=current.waswritten;
8856   }
8857
8858   /* Pass 4 - Cull unused host registers */
8859
8860   uint64_t nr=0;
8861
8862   for (i=slen-1;i>=0;i--)
8863   {
8864     int hr;
8865     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
8866     {
8867       if(ba[i]<start || ba[i]>=(start+slen*4))
8868       {
8869         // Branch out of this block, don't need anything
8870         nr=0;
8871       }
8872       else
8873       {
8874         // Internal branch
8875         // Need whatever matches the target
8876         nr=0;
8877         int t=(ba[i]-start)>>2;
8878         for(hr=0;hr<HOST_REGS;hr++)
8879         {
8880           if(regs[i].regmap_entry[hr]>=0) {
8881             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
8882           }
8883         }
8884       }
8885       // Conditional branch may need registers for following instructions
8886       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
8887       {
8888         if(i<slen-2) {
8889           nr|=needed_reg[i+2];
8890           for(hr=0;hr<HOST_REGS;hr++)
8891           {
8892             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
8893             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
8894           }
8895         }
8896       }
8897       // Don't need stuff which is overwritten
8898       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8899       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8900       // Merge in delay slot
8901       for(hr=0;hr<HOST_REGS;hr++)
8902       {
8903         if(!likely[i]) {
8904           // These are overwritten unless the branch is "likely"
8905           // and the delay slot is nullified if not taken
8906           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8907           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8908         }
8909         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8910         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8911         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8912         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
8913         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8914         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8915         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8916         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8917         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
8918           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8919           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8920         }
8921         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
8922           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8923           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8924         }
8925         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
8926           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8927           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8928         }
8929       }
8930     }
8931     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
8932     {
8933       // SYSCALL instruction (software interrupt)
8934       nr=0;
8935     }
8936     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
8937     {
8938       // ERET instruction (return from interrupt)
8939       nr=0;
8940     }
8941     else // Non-branch
8942     {
8943       if(i<slen-1) {
8944         for(hr=0;hr<HOST_REGS;hr++) {
8945           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
8946           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
8947           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
8948           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
8949         }
8950       }
8951     }
8952     for(hr=0;hr<HOST_REGS;hr++)
8953     {
8954       // Overwritten registers are not needed
8955       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8956       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8957       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
8958       // Source registers are needed
8959       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8960       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8961       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
8962       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
8963       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8964       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8965       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8966       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
8967       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
8968         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8969         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8970       }
8971       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
8972         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8973         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8974       }
8975       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
8976         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
8977         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
8978       }
8979       // Don't store a register immediately after writing it,
8980       // may prevent dual-issue.
8981       // But do so if this is a branch target, otherwise we
8982       // might have to load the register before the branch.
8983       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
8984         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
8985            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
8986           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8987           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
8988         }
8989         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
8990            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
8991           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8992           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
8993         }
8994       }
8995     }
8996     // Cycle count is needed at branches.  Assume it is needed at the target too.
8997     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
8998       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
8999       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9000     }
9001     // Save it
9002     needed_reg[i]=nr;
9003
9004     // Deallocate unneeded registers
9005     for(hr=0;hr<HOST_REGS;hr++)
9006     {
9007       if(!((nr>>hr)&1)) {
9008         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9009         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9010            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9011            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9012         {
9013           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9014           {
9015             if(likely[i]) {
9016               regs[i].regmap[hr]=-1;
9017               regs[i].isconst&=~(1<<hr);
9018               if(i<slen-2) {
9019                 regmap_pre[i+2][hr]=-1;
9020                 regs[i+2].wasconst&=~(1<<hr);
9021               }
9022             }
9023           }
9024         }
9025         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9026         {
9027           int d1=0,d2=0,map=0,temp=0;
9028           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9029           {
9030             d1=dep1[i+1];
9031             d2=dep2[i+1];
9032           }
9033           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9034              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9035             map=INVCP;
9036           }
9037           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9038              itype[i+1]==C1LS || itype[i+1]==C2LS)
9039             temp=FTEMP;
9040           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9041              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9042              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9043              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9044              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9045              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9046              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9047              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9048              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9049              regs[i].regmap[hr]!=map )
9050           {
9051             regs[i].regmap[hr]=-1;
9052             regs[i].isconst&=~(1<<hr);
9053             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9054                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9055                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9056                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9057                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9058                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9059                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9060                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9061                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9062                branch_regs[i].regmap[hr]!=map)
9063             {
9064               branch_regs[i].regmap[hr]=-1;
9065               branch_regs[i].regmap_entry[hr]=-1;
9066               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9067               {
9068                 if(!likely[i]&&i<slen-2) {
9069                   regmap_pre[i+2][hr]=-1;
9070                   regs[i+2].wasconst&=~(1<<hr);
9071                 }
9072               }
9073             }
9074           }
9075         }
9076         else
9077         {
9078           // Non-branch
9079           if(i>0)
9080           {
9081             int d1=0,d2=0,map=-1,temp=-1;
9082             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9083             {
9084               d1=dep1[i];
9085               d2=dep2[i];
9086             }
9087             if(itype[i]==STORE || itype[i]==STORELR ||
9088                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9089               map=INVCP;
9090             }
9091             if(itype[i]==LOADLR || itype[i]==STORELR ||
9092                itype[i]==C1LS || itype[i]==C2LS)
9093               temp=FTEMP;
9094             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9095                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
9096                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9097                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
9098                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
9099                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
9100             {
9101               if(i<slen-1&&!is_ds[i]) {
9102                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
9103                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
9104                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
9105                 {
9106                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
9107                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
9108                 }
9109                 regmap_pre[i+1][hr]=-1;
9110                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
9111                 regs[i+1].wasconst&=~(1<<hr);
9112               }
9113               regs[i].regmap[hr]=-1;
9114               regs[i].isconst&=~(1<<hr);
9115             }
9116           }
9117         }
9118       }
9119     }
9120   }
9121
9122   /* Pass 5 - Pre-allocate registers */
9123
9124   // If a register is allocated during a loop, try to allocate it for the
9125   // entire loop, if possible.  This avoids loading/storing registers
9126   // inside of the loop.
9127
9128   signed char f_regmap[HOST_REGS];
9129   clear_all_regs(f_regmap);
9130   for(i=0;i<slen-1;i++)
9131   {
9132     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9133     {
9134       if(ba[i]>=start && ba[i]<(start+i*4))
9135       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
9136       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
9137       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
9138       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
9139       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
9140       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
9141       {
9142         int t=(ba[i]-start)>>2;
9143         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
9144         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
9145         for(hr=0;hr<HOST_REGS;hr++)
9146         {
9147           if(regs[i].regmap[hr]>64) {
9148             if(!((regs[i].dirty>>hr)&1))
9149               f_regmap[hr]=regs[i].regmap[hr];
9150             else f_regmap[hr]=-1;
9151           }
9152           else if(regs[i].regmap[hr]>=0) {
9153             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9154               // dealloc old register
9155               int n;
9156               for(n=0;n<HOST_REGS;n++)
9157               {
9158                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9159               }
9160               // and alloc new one
9161               f_regmap[hr]=regs[i].regmap[hr];
9162             }
9163           }
9164           if(branch_regs[i].regmap[hr]>64) {
9165             if(!((branch_regs[i].dirty>>hr)&1))
9166               f_regmap[hr]=branch_regs[i].regmap[hr];
9167             else f_regmap[hr]=-1;
9168           }
9169           else if(branch_regs[i].regmap[hr]>=0) {
9170             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
9171               // dealloc old register
9172               int n;
9173               for(n=0;n<HOST_REGS;n++)
9174               {
9175                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
9176               }
9177               // and alloc new one
9178               f_regmap[hr]=branch_regs[i].regmap[hr];
9179             }
9180           }
9181           if(ooo[i]) {
9182             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
9183               f_regmap[hr]=branch_regs[i].regmap[hr];
9184           }else{
9185             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
9186               f_regmap[hr]=branch_regs[i].regmap[hr];
9187           }
9188           // Avoid dirty->clean transition
9189           #ifdef DESTRUCTIVE_WRITEBACK
9190           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
9191           #endif
9192           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
9193           // case above, however it's always a good idea.  We can't hoist the
9194           // load if the register was already allocated, so there's no point
9195           // wasting time analyzing most of these cases.  It only "succeeds"
9196           // when the mapping was different and the load can be replaced with
9197           // a mov, which is of negligible benefit.  So such cases are
9198           // skipped below.
9199           if(f_regmap[hr]>0) {
9200             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
9201               int r=f_regmap[hr];
9202               for(j=t;j<=i;j++)
9203               {
9204                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9205                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
9206                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
9207                 if(r>63) {
9208                   // NB This can exclude the case where the upper-half
9209                   // register is lower numbered than the lower-half
9210                   // register.  Not sure if it's worth fixing...
9211                   if(get_reg(regs[j].regmap,r&63)<0) break;
9212                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
9213                   if(regs[j].is32&(1LL<<(r&63))) break;
9214                 }
9215                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
9216                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
9217                   int k;
9218                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
9219                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
9220                     if(r>63) {
9221                       if(get_reg(regs[i].regmap,r&63)<0) break;
9222                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
9223                     }
9224                     k=i;
9225                     while(k>1&&regs[k-1].regmap[hr]==-1) {
9226                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9227                         //printf("no free regs for store %x\n",start+(k-1)*4);
9228                         break;
9229                       }
9230                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
9231                         //printf("no-match due to different register\n");
9232                         break;
9233                       }
9234                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
9235                         //printf("no-match due to branch\n");
9236                         break;
9237                       }
9238                       // call/ret fast path assumes no registers allocated
9239                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
9240                         break;
9241                       }
9242                       if(r>63) {
9243                         // NB This can exclude the case where the upper-half
9244                         // register is lower numbered than the lower-half
9245                         // register.  Not sure if it's worth fixing...
9246                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
9247                         if(regs[k-1].is32&(1LL<<(r&63))) break;
9248                       }
9249                       k--;
9250                     }
9251                     if(i<slen-1) {
9252                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
9253                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
9254                         //printf("bad match after branch\n");
9255                         break;
9256                       }
9257                     }
9258                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
9259                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
9260                       while(k<i) {
9261                         regs[k].regmap_entry[hr]=f_regmap[hr];
9262                         regs[k].regmap[hr]=f_regmap[hr];
9263                         regmap_pre[k+1][hr]=f_regmap[hr];
9264                         regs[k].wasdirty&=~(1<<hr);
9265                         regs[k].dirty&=~(1<<hr);
9266                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
9267                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
9268                         regs[k].wasconst&=~(1<<hr);
9269                         regs[k].isconst&=~(1<<hr);
9270                         k++;
9271                       }
9272                     }
9273                     else {
9274                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
9275                       break;
9276                     }
9277                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
9278                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
9279                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
9280                       regs[i].regmap_entry[hr]=f_regmap[hr];
9281                       regs[i].regmap[hr]=f_regmap[hr];
9282                       regs[i].wasdirty&=~(1<<hr);
9283                       regs[i].dirty&=~(1<<hr);
9284                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
9285                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
9286                       regs[i].wasconst&=~(1<<hr);
9287                       regs[i].isconst&=~(1<<hr);
9288                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
9289                       branch_regs[i].wasdirty&=~(1<<hr);
9290                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
9291                       branch_regs[i].regmap[hr]=f_regmap[hr];
9292                       branch_regs[i].dirty&=~(1<<hr);
9293                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
9294                       branch_regs[i].wasconst&=~(1<<hr);
9295                       branch_regs[i].isconst&=~(1<<hr);
9296                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
9297                         regmap_pre[i+2][hr]=f_regmap[hr];
9298                         regs[i+2].wasdirty&=~(1<<hr);
9299                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
9300                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
9301                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
9302                       }
9303                     }
9304                   }
9305                   for(k=t;k<j;k++) {
9306                     // Alloc register clean at beginning of loop,
9307                     // but may dirty it in pass 6
9308                     regs[k].regmap_entry[hr]=f_regmap[hr];
9309                     regs[k].regmap[hr]=f_regmap[hr];
9310                     regs[k].dirty&=~(1<<hr);
9311                     regs[k].wasconst&=~(1<<hr);
9312                     regs[k].isconst&=~(1<<hr);
9313                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
9314                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
9315                       branch_regs[k].regmap[hr]=f_regmap[hr];
9316                       branch_regs[k].dirty&=~(1<<hr);
9317                       branch_regs[k].wasconst&=~(1<<hr);
9318                       branch_regs[k].isconst&=~(1<<hr);
9319                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
9320                         regmap_pre[k+2][hr]=f_regmap[hr];
9321                         regs[k+2].wasdirty&=~(1<<hr);
9322                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
9323                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
9324                       }
9325                     }
9326                     else
9327                     {
9328                       regmap_pre[k+1][hr]=f_regmap[hr];
9329                       regs[k+1].wasdirty&=~(1<<hr);
9330                     }
9331                   }
9332                   if(regs[j].regmap[hr]==f_regmap[hr])
9333                     regs[j].regmap_entry[hr]=f_regmap[hr];
9334                   break;
9335                 }
9336                 if(j==i) break;
9337                 if(regs[j].regmap[hr]>=0)
9338                   break;
9339                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
9340                   //printf("no-match due to different register\n");
9341                   break;
9342                 }
9343                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
9344                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
9345                   break;
9346                 }
9347                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9348                 {
9349                   // Stop on unconditional branch
9350                   break;
9351                 }
9352                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
9353                 {
9354                   if(ooo[j]) {
9355                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
9356                       break;
9357                   }else{
9358                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
9359                       break;
9360                   }
9361                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
9362                     //printf("no-match due to different register (branch)\n");
9363                     break;
9364                   }
9365                 }
9366                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9367                   //printf("No free regs for store %x\n",start+j*4);
9368                   break;
9369                 }
9370                 if(f_regmap[hr]>=64) {
9371                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
9372                     break;
9373                   }
9374                   else
9375                   {
9376                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
9377                       break;
9378                     }
9379                   }
9380                 }
9381               }
9382             }
9383           }
9384         }
9385       }
9386     }else{
9387       // Non branch or undetermined branch target
9388       for(hr=0;hr<HOST_REGS;hr++)
9389       {
9390         if(hr!=EXCLUDE_REG) {
9391           if(regs[i].regmap[hr]>64) {
9392             if(!((regs[i].dirty>>hr)&1))
9393               f_regmap[hr]=regs[i].regmap[hr];
9394           }
9395           else if(regs[i].regmap[hr]>=0) {
9396             if(f_regmap[hr]!=regs[i].regmap[hr]) {
9397               // dealloc old register
9398               int n;
9399               for(n=0;n<HOST_REGS;n++)
9400               {
9401                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
9402               }
9403               // and alloc new one
9404               f_regmap[hr]=regs[i].regmap[hr];
9405             }
9406           }
9407         }
9408       }
9409       // Try to restore cycle count at branch targets
9410       if(bt[i]) {
9411         for(j=i;j<slen-1;j++) {
9412           if(regs[j].regmap[HOST_CCREG]!=-1) break;
9413           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
9414             //printf("no free regs for store %x\n",start+j*4);
9415             break;
9416           }
9417         }
9418         if(regs[j].regmap[HOST_CCREG]==CCREG) {
9419           int k=i;
9420           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
9421           while(k<j) {
9422             regs[k].regmap_entry[HOST_CCREG]=CCREG;
9423             regs[k].regmap[HOST_CCREG]=CCREG;
9424             regmap_pre[k+1][HOST_CCREG]=CCREG;
9425             regs[k+1].wasdirty|=1<<HOST_CCREG;
9426             regs[k].dirty|=1<<HOST_CCREG;
9427             regs[k].wasconst&=~(1<<HOST_CCREG);
9428             regs[k].isconst&=~(1<<HOST_CCREG);
9429             k++;
9430           }
9431           regs[j].regmap_entry[HOST_CCREG]=CCREG;
9432         }
9433         // Work backwards from the branch target
9434         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
9435         {
9436           //printf("Extend backwards\n");
9437           int k;
9438           k=i;
9439           while(regs[k-1].regmap[HOST_CCREG]==-1) {
9440             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
9441               //printf("no free regs for store %x\n",start+(k-1)*4);
9442               break;
9443             }
9444             k--;
9445           }
9446           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
9447             //printf("Extend CC, %x ->\n",start+k*4);
9448             while(k<=i) {
9449               regs[k].regmap_entry[HOST_CCREG]=CCREG;
9450               regs[k].regmap[HOST_CCREG]=CCREG;
9451               regmap_pre[k+1][HOST_CCREG]=CCREG;
9452               regs[k+1].wasdirty|=1<<HOST_CCREG;
9453               regs[k].dirty|=1<<HOST_CCREG;
9454               regs[k].wasconst&=~(1<<HOST_CCREG);
9455               regs[k].isconst&=~(1<<HOST_CCREG);
9456               k++;
9457             }
9458           }
9459           else {
9460             //printf("Fail Extend CC, %x ->\n",start+k*4);
9461           }
9462         }
9463       }
9464       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
9465          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
9466          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
9467          itype[i]!=FCONV&&itype[i]!=FCOMP)
9468       {
9469         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
9470       }
9471     }
9472   }
9473
9474   // Cache memory offset or tlb map pointer if a register is available
9475   #ifndef HOST_IMM_ADDR32
9476   #ifndef RAM_OFFSET
9477   if(0)
9478   #endif
9479   {
9480     int earliest_available[HOST_REGS];
9481     int loop_start[HOST_REGS];
9482     int score[HOST_REGS];
9483     int end[HOST_REGS];
9484     int reg=ROREG;
9485
9486     // Init
9487     for(hr=0;hr<HOST_REGS;hr++) {
9488       score[hr]=0;earliest_available[hr]=0;
9489       loop_start[hr]=MAXBLOCK;
9490     }
9491     for(i=0;i<slen-1;i++)
9492     {
9493       // Can't do anything if no registers are available
9494       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
9495         for(hr=0;hr<HOST_REGS;hr++) {
9496           score[hr]=0;earliest_available[hr]=i+1;
9497           loop_start[hr]=MAXBLOCK;
9498         }
9499       }
9500       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9501         if(!ooo[i]) {
9502           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
9503             for(hr=0;hr<HOST_REGS;hr++) {
9504               score[hr]=0;earliest_available[hr]=i+1;
9505               loop_start[hr]=MAXBLOCK;
9506             }
9507           }
9508         }else{
9509           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
9510             for(hr=0;hr<HOST_REGS;hr++) {
9511               score[hr]=0;earliest_available[hr]=i+1;
9512               loop_start[hr]=MAXBLOCK;
9513             }
9514           }
9515         }
9516       }
9517       // Mark unavailable registers
9518       for(hr=0;hr<HOST_REGS;hr++) {
9519         if(regs[i].regmap[hr]>=0) {
9520           score[hr]=0;earliest_available[hr]=i+1;
9521           loop_start[hr]=MAXBLOCK;
9522         }
9523         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9524           if(branch_regs[i].regmap[hr]>=0) {
9525             score[hr]=0;earliest_available[hr]=i+2;
9526             loop_start[hr]=MAXBLOCK;
9527           }
9528         }
9529       }
9530       // No register allocations after unconditional jumps
9531       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
9532       {
9533         for(hr=0;hr<HOST_REGS;hr++) {
9534           score[hr]=0;earliest_available[hr]=i+2;
9535           loop_start[hr]=MAXBLOCK;
9536         }
9537         i++; // Skip delay slot too
9538         //printf("skip delay slot: %x\n",start+i*4);
9539       }
9540       else
9541       // Possible match
9542       if(itype[i]==LOAD||itype[i]==LOADLR||
9543          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
9544         for(hr=0;hr<HOST_REGS;hr++) {
9545           if(hr!=EXCLUDE_REG) {
9546             end[hr]=i-1;
9547             for(j=i;j<slen-1;j++) {
9548               if(regs[j].regmap[hr]>=0) break;
9549               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9550                 if(branch_regs[j].regmap[hr]>=0) break;
9551                 if(ooo[j]) {
9552                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
9553                 }else{
9554                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
9555                 }
9556               }
9557               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
9558               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9559                 int t=(ba[j]-start)>>2;
9560                 if(t<j&&t>=earliest_available[hr]) {
9561                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
9562                     // Score a point for hoisting loop invariant
9563                     if(t<loop_start[hr]) loop_start[hr]=t;
9564                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
9565                     score[hr]++;
9566                     end[hr]=j;
9567                   }
9568                 }
9569                 else if(t<j) {
9570                   if(regs[t].regmap[hr]==reg) {
9571                     // Score a point if the branch target matches this register
9572                     score[hr]++;
9573                     end[hr]=j;
9574                   }
9575                 }
9576                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
9577                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
9578                   score[hr]++;
9579                   end[hr]=j;
9580                 }
9581               }
9582               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
9583               {
9584                 // Stop on unconditional branch
9585                 break;
9586               }
9587               else
9588               if(itype[j]==LOAD||itype[j]==LOADLR||
9589                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
9590                 score[hr]++;
9591                 end[hr]=j;
9592               }
9593             }
9594           }
9595         }
9596         // Find highest score and allocate that register
9597         int maxscore=0;
9598         for(hr=0;hr<HOST_REGS;hr++) {
9599           if(hr!=EXCLUDE_REG) {
9600             if(score[hr]>score[maxscore]) {
9601               maxscore=hr;
9602               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
9603             }
9604           }
9605         }
9606         if(score[maxscore]>1)
9607         {
9608           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
9609           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
9610             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
9611             assert(regs[j].regmap[maxscore]<0);
9612             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
9613             regs[j].regmap[maxscore]=reg;
9614             regs[j].dirty&=~(1<<maxscore);
9615             regs[j].wasconst&=~(1<<maxscore);
9616             regs[j].isconst&=~(1<<maxscore);
9617             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
9618               branch_regs[j].regmap[maxscore]=reg;
9619               branch_regs[j].wasdirty&=~(1<<maxscore);
9620               branch_regs[j].dirty&=~(1<<maxscore);
9621               branch_regs[j].wasconst&=~(1<<maxscore);
9622               branch_regs[j].isconst&=~(1<<maxscore);
9623               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
9624                 regmap_pre[j+2][maxscore]=reg;
9625                 regs[j+2].wasdirty&=~(1<<maxscore);
9626               }
9627               // loop optimization (loop_preload)
9628               int t=(ba[j]-start)>>2;
9629               if(t==loop_start[maxscore]) {
9630                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
9631                   regs[t].regmap_entry[maxscore]=reg;
9632               }
9633             }
9634             else
9635             {
9636               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
9637                 regmap_pre[j+1][maxscore]=reg;
9638                 regs[j+1].wasdirty&=~(1<<maxscore);
9639               }
9640             }
9641           }
9642           i=j-1;
9643           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
9644           for(hr=0;hr<HOST_REGS;hr++) {
9645             score[hr]=0;earliest_available[hr]=i+i;
9646             loop_start[hr]=MAXBLOCK;
9647           }
9648         }
9649       }
9650     }
9651   }
9652   #endif
9653
9654   // This allocates registers (if possible) one instruction prior
9655   // to use, which can avoid a load-use penalty on certain CPUs.
9656   for(i=0;i<slen-1;i++)
9657   {
9658     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
9659     {
9660       if(!bt[i+1])
9661       {
9662         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
9663            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
9664         {
9665           if(rs1[i+1]) {
9666             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
9667             {
9668               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9669               {
9670                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9671                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9672                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9673                 regs[i].isconst&=~(1<<hr);
9674                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9675                 constmap[i][hr]=constmap[i+1][hr];
9676                 regs[i+1].wasdirty&=~(1<<hr);
9677                 regs[i].dirty&=~(1<<hr);
9678               }
9679             }
9680           }
9681           if(rs2[i+1]) {
9682             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
9683             {
9684               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9685               {
9686                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
9687                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
9688                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
9689                 regs[i].isconst&=~(1<<hr);
9690                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9691                 constmap[i][hr]=constmap[i+1][hr];
9692                 regs[i+1].wasdirty&=~(1<<hr);
9693                 regs[i].dirty&=~(1<<hr);
9694               }
9695             }
9696           }
9697           // Preload target address for load instruction (non-constant)
9698           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9699             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9700             {
9701               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9702               {
9703                 regs[i].regmap[hr]=rs1[i+1];
9704                 regmap_pre[i+1][hr]=rs1[i+1];
9705                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9706                 regs[i].isconst&=~(1<<hr);
9707                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9708                 constmap[i][hr]=constmap[i+1][hr];
9709                 regs[i+1].wasdirty&=~(1<<hr);
9710                 regs[i].dirty&=~(1<<hr);
9711               }
9712             }
9713           }
9714           // Load source into target register
9715           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9716             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
9717             {
9718               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9719               {
9720                 regs[i].regmap[hr]=rs1[i+1];
9721                 regmap_pre[i+1][hr]=rs1[i+1];
9722                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9723                 regs[i].isconst&=~(1<<hr);
9724                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9725                 constmap[i][hr]=constmap[i+1][hr];
9726                 regs[i+1].wasdirty&=~(1<<hr);
9727                 regs[i].dirty&=~(1<<hr);
9728               }
9729             }
9730           }
9731           // Address for store instruction (non-constant)
9732           if(itype[i+1]==STORE||itype[i+1]==STORELR
9733              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
9734             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9735               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
9736               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9737               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
9738               assert(hr>=0);
9739               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9740               {
9741                 regs[i].regmap[hr]=rs1[i+1];
9742                 regmap_pre[i+1][hr]=rs1[i+1];
9743                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9744                 regs[i].isconst&=~(1<<hr);
9745                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9746                 constmap[i][hr]=constmap[i+1][hr];
9747                 regs[i+1].wasdirty&=~(1<<hr);
9748                 regs[i].dirty&=~(1<<hr);
9749               }
9750             }
9751           }
9752           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
9753             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
9754               int nr;
9755               hr=get_reg(regs[i+1].regmap,FTEMP);
9756               assert(hr>=0);
9757               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
9758               {
9759                 regs[i].regmap[hr]=rs1[i+1];
9760                 regmap_pre[i+1][hr]=rs1[i+1];
9761                 regs[i+1].regmap_entry[hr]=rs1[i+1];
9762                 regs[i].isconst&=~(1<<hr);
9763                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
9764                 constmap[i][hr]=constmap[i+1][hr];
9765                 regs[i+1].wasdirty&=~(1<<hr);
9766                 regs[i].dirty&=~(1<<hr);
9767               }
9768               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
9769               {
9770                 // move it to another register
9771                 regs[i+1].regmap[hr]=-1;
9772                 regmap_pre[i+2][hr]=-1;
9773                 regs[i+1].regmap[nr]=FTEMP;
9774                 regmap_pre[i+2][nr]=FTEMP;
9775                 regs[i].regmap[nr]=rs1[i+1];
9776                 regmap_pre[i+1][nr]=rs1[i+1];
9777                 regs[i+1].regmap_entry[nr]=rs1[i+1];
9778                 regs[i].isconst&=~(1<<nr);
9779                 regs[i+1].isconst&=~(1<<nr);
9780                 regs[i].dirty&=~(1<<nr);
9781                 regs[i+1].wasdirty&=~(1<<nr);
9782                 regs[i+1].dirty&=~(1<<nr);
9783                 regs[i+2].wasdirty&=~(1<<nr);
9784               }
9785             }
9786           }
9787           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
9788             if(itype[i+1]==LOAD)
9789               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
9790             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
9791               hr=get_reg(regs[i+1].regmap,FTEMP);
9792             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
9793               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
9794               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
9795             }
9796             if(hr>=0&&regs[i].regmap[hr]<0) {
9797               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
9798               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
9799                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
9800                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
9801                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
9802                 regs[i].isconst&=~(1<<hr);
9803                 regs[i+1].wasdirty&=~(1<<hr);
9804                 regs[i].dirty&=~(1<<hr);
9805               }
9806             }
9807           }
9808         }
9809       }
9810     }
9811   }
9812
9813   /* Pass 6 - Optimize clean/dirty state */
9814   clean_registers(0,slen-1,1);
9815
9816   /* Pass 7 - Identify 32-bit registers */
9817   for (i=slen-1;i>=0;i--)
9818   {
9819     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9820     {
9821       // Conditional branch
9822       if((source[i]>>16)!=0x1000&&i<slen-2) {
9823         // Mark this address as a branch target since it may be called
9824         // upon return from interrupt
9825         bt[i+2]=1;
9826       }
9827     }
9828   }
9829
9830   if(itype[slen-1]==SPAN) {
9831     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
9832   }
9833
9834 #ifdef DISASM
9835   /* Debug/disassembly */
9836   for(i=0;i<slen;i++)
9837   {
9838     printf("U:");
9839     int r;
9840     for(r=1;r<=CCREG;r++) {
9841       if((unneeded_reg[i]>>r)&1) {
9842         if(r==HIREG) printf(" HI");
9843         else if(r==LOREG) printf(" LO");
9844         else printf(" r%d",r);
9845       }
9846     }
9847     printf("\n");
9848     #if defined(__i386__) || defined(__x86_64__)
9849     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
9850     #endif
9851     #ifdef __arm__
9852     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
9853     #endif
9854     printf("needs: ");
9855     if(needed_reg[i]&1) printf("eax ");
9856     if((needed_reg[i]>>1)&1) printf("ecx ");
9857     if((needed_reg[i]>>2)&1) printf("edx ");
9858     if((needed_reg[i]>>3)&1) printf("ebx ");
9859     if((needed_reg[i]>>5)&1) printf("ebp ");
9860     if((needed_reg[i]>>6)&1) printf("esi ");
9861     if((needed_reg[i]>>7)&1) printf("edi ");
9862     printf("\n");
9863     #if defined(__i386__) || defined(__x86_64__)
9864     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
9865     printf("dirty: ");
9866     if(regs[i].wasdirty&1) printf("eax ");
9867     if((regs[i].wasdirty>>1)&1) printf("ecx ");
9868     if((regs[i].wasdirty>>2)&1) printf("edx ");
9869     if((regs[i].wasdirty>>3)&1) printf("ebx ");
9870     if((regs[i].wasdirty>>5)&1) printf("ebp ");
9871     if((regs[i].wasdirty>>6)&1) printf("esi ");
9872     if((regs[i].wasdirty>>7)&1) printf("edi ");
9873     #endif
9874     #ifdef __arm__
9875     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
9876     printf("dirty: ");
9877     if(regs[i].wasdirty&1) printf("r0 ");
9878     if((regs[i].wasdirty>>1)&1) printf("r1 ");
9879     if((regs[i].wasdirty>>2)&1) printf("r2 ");
9880     if((regs[i].wasdirty>>3)&1) printf("r3 ");
9881     if((regs[i].wasdirty>>4)&1) printf("r4 ");
9882     if((regs[i].wasdirty>>5)&1) printf("r5 ");
9883     if((regs[i].wasdirty>>6)&1) printf("r6 ");
9884     if((regs[i].wasdirty>>7)&1) printf("r7 ");
9885     if((regs[i].wasdirty>>8)&1) printf("r8 ");
9886     if((regs[i].wasdirty>>9)&1) printf("r9 ");
9887     if((regs[i].wasdirty>>10)&1) printf("r10 ");
9888     if((regs[i].wasdirty>>12)&1) printf("r12 ");
9889     #endif
9890     printf("\n");
9891     disassemble_inst(i);
9892     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
9893     #if defined(__i386__) || defined(__x86_64__)
9894     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
9895     if(regs[i].dirty&1) printf("eax ");
9896     if((regs[i].dirty>>1)&1) printf("ecx ");
9897     if((regs[i].dirty>>2)&1) printf("edx ");
9898     if((regs[i].dirty>>3)&1) printf("ebx ");
9899     if((regs[i].dirty>>5)&1) printf("ebp ");
9900     if((regs[i].dirty>>6)&1) printf("esi ");
9901     if((regs[i].dirty>>7)&1) printf("edi ");
9902     #endif
9903     #ifdef __arm__
9904     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
9905     if(regs[i].dirty&1) printf("r0 ");
9906     if((regs[i].dirty>>1)&1) printf("r1 ");
9907     if((regs[i].dirty>>2)&1) printf("r2 ");
9908     if((regs[i].dirty>>3)&1) printf("r3 ");
9909     if((regs[i].dirty>>4)&1) printf("r4 ");
9910     if((regs[i].dirty>>5)&1) printf("r5 ");
9911     if((regs[i].dirty>>6)&1) printf("r6 ");
9912     if((regs[i].dirty>>7)&1) printf("r7 ");
9913     if((regs[i].dirty>>8)&1) printf("r8 ");
9914     if((regs[i].dirty>>9)&1) printf("r9 ");
9915     if((regs[i].dirty>>10)&1) printf("r10 ");
9916     if((regs[i].dirty>>12)&1) printf("r12 ");
9917     #endif
9918     printf("\n");
9919     if(regs[i].isconst) {
9920       printf("constants: ");
9921       #if defined(__i386__) || defined(__x86_64__)
9922       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
9923       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
9924       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
9925       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
9926       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
9927       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
9928       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
9929       #endif
9930       #ifdef __arm__
9931       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
9932       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
9933       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
9934       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
9935       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
9936       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
9937       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
9938       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
9939       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
9940       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
9941       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
9942       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
9943       #endif
9944       printf("\n");
9945     }
9946     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
9947       #if defined(__i386__) || defined(__x86_64__)
9948       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
9949       if(branch_regs[i].dirty&1) printf("eax ");
9950       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
9951       if((branch_regs[i].dirty>>2)&1) printf("edx ");
9952       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
9953       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
9954       if((branch_regs[i].dirty>>6)&1) printf("esi ");
9955       if((branch_regs[i].dirty>>7)&1) printf("edi ");
9956       #endif
9957       #ifdef __arm__
9958       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
9959       if(branch_regs[i].dirty&1) printf("r0 ");
9960       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
9961       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
9962       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
9963       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
9964       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
9965       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
9966       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
9967       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
9968       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
9969       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
9970       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
9971       #endif
9972     }
9973   }
9974 #endif // DISASM
9975
9976   /* Pass 8 - Assembly */
9977   linkcount=0;stubcount=0;
9978   ds=0;is_delayslot=0;
9979   cop1_usable=0;
9980   uint64_t is32_pre=0;
9981   u_int dirty_pre=0;
9982   void *beginning=start_block();
9983   if((u_int)addr&1) {
9984     ds=1;
9985     pagespan_ds();
9986   }
9987   u_int instr_addr0_override=0;
9988
9989   if (start == 0x80030000) {
9990     // nasty hack for fastbios thing
9991     // override block entry to this code
9992     instr_addr0_override=(u_int)out;
9993     emit_movimm(start,0);
9994     // abuse io address var as a flag that we
9995     // have already returned here once
9996     emit_readword((int)&address,1);
9997     emit_writeword(0,(int)&pcaddr);
9998     emit_writeword(0,(int)&address);
9999     emit_cmp(0,1);
10000     emit_jne((int)new_dyna_leave);
10001   }
10002   for(i=0;i<slen;i++)
10003   {
10004     //if(ds) printf("ds: ");
10005     disassemble_inst(i);
10006     if(ds) {
10007       ds=0; // Skip delay slot
10008       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
10009       instr_addr[i]=0;
10010     } else {
10011       speculate_register_values(i);
10012       #ifndef DESTRUCTIVE_WRITEBACK
10013       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10014       {
10015         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
10016               unneeded_reg[i],unneeded_reg_upper[i]);
10017       }
10018       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
10019         is32_pre=branch_regs[i].is32;
10020         dirty_pre=branch_regs[i].dirty;
10021       }else{
10022         is32_pre=regs[i].is32;
10023         dirty_pre=regs[i].dirty;
10024       }
10025       #endif
10026       // write back
10027       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
10028       {
10029         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
10030                       unneeded_reg[i],unneeded_reg_upper[i]);
10031         loop_preload(regmap_pre[i],regs[i].regmap_entry);
10032       }
10033       // branch target entry point
10034       instr_addr[i]=(u_int)out;
10035       assem_debug("<->\n");
10036       // load regs
10037       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
10038         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
10039       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
10040       address_generation(i,&regs[i],regs[i].regmap_entry);
10041       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
10042       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10043       {
10044         // Load the delay slot registers if necessary
10045         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
10046           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10047         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
10048           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10049         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
10050           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10051       }
10052       else if(i+1<slen)
10053       {
10054         // Preload registers for following instruction
10055         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
10056           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
10057             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
10058         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
10059           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
10060             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
10061       }
10062       // TODO: if(is_ooo(i)) address_generation(i+1);
10063       if(itype[i]==CJUMP||itype[i]==FJUMP)
10064         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
10065       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
10066         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
10067       if(bt[i]) cop1_usable=0;
10068       // assemble
10069       switch(itype[i]) {
10070         case ALU:
10071           alu_assemble(i,&regs[i]);break;
10072         case IMM16:
10073           imm16_assemble(i,&regs[i]);break;
10074         case SHIFT:
10075           shift_assemble(i,&regs[i]);break;
10076         case SHIFTIMM:
10077           shiftimm_assemble(i,&regs[i]);break;
10078         case LOAD:
10079           load_assemble(i,&regs[i]);break;
10080         case LOADLR:
10081           loadlr_assemble(i,&regs[i]);break;
10082         case STORE:
10083           store_assemble(i,&regs[i]);break;
10084         case STORELR:
10085           storelr_assemble(i,&regs[i]);break;
10086         case COP0:
10087           cop0_assemble(i,&regs[i]);break;
10088         case COP1:
10089           cop1_assemble(i,&regs[i]);break;
10090         case C1LS:
10091           c1ls_assemble(i,&regs[i]);break;
10092         case COP2:
10093           cop2_assemble(i,&regs[i]);break;
10094         case C2LS:
10095           c2ls_assemble(i,&regs[i]);break;
10096         case C2OP:
10097           c2op_assemble(i,&regs[i]);break;
10098         case FCONV:
10099           fconv_assemble(i,&regs[i]);break;
10100         case FLOAT:
10101           float_assemble(i,&regs[i]);break;
10102         case FCOMP:
10103           fcomp_assemble(i,&regs[i]);break;
10104         case MULTDIV:
10105           multdiv_assemble(i,&regs[i]);break;
10106         case MOV:
10107           mov_assemble(i,&regs[i]);break;
10108         case SYSCALL:
10109           syscall_assemble(i,&regs[i]);break;
10110         case HLECALL:
10111           hlecall_assemble(i,&regs[i]);break;
10112         case INTCALL:
10113           intcall_assemble(i,&regs[i]);break;
10114         case UJUMP:
10115           ujump_assemble(i,&regs[i]);ds=1;break;
10116         case RJUMP:
10117           rjump_assemble(i,&regs[i]);ds=1;break;
10118         case CJUMP:
10119           cjump_assemble(i,&regs[i]);ds=1;break;
10120         case SJUMP:
10121           sjump_assemble(i,&regs[i]);ds=1;break;
10122         case FJUMP:
10123           fjump_assemble(i,&regs[i]);ds=1;break;
10124         case SPAN:
10125           pagespan_assemble(i,&regs[i]);break;
10126       }
10127       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10128         literal_pool(1024);
10129       else
10130         literal_pool_jumpover(256);
10131     }
10132   }
10133   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
10134   // If the block did not end with an unconditional branch,
10135   // add a jump to the next instruction.
10136   if(i>1) {
10137     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
10138       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10139       assert(i==slen);
10140       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
10141         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10142         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10143           emit_loadreg(CCREG,HOST_CCREG);
10144         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10145       }
10146       else if(!likely[i-2])
10147       {
10148         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
10149         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
10150       }
10151       else
10152       {
10153         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
10154         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
10155       }
10156       add_to_linker((int)out,start+i*4,0);
10157       emit_jmp(0);
10158     }
10159   }
10160   else
10161   {
10162     assert(i>0);
10163     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
10164     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
10165     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
10166       emit_loadreg(CCREG,HOST_CCREG);
10167     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
10168     add_to_linker((int)out,start+i*4,0);
10169     emit_jmp(0);
10170   }
10171
10172   // TODO: delay slot stubs?
10173   // Stubs
10174   for(i=0;i<stubcount;i++)
10175   {
10176     switch(stubs[i][0])
10177     {
10178       case LOADB_STUB:
10179       case LOADH_STUB:
10180       case LOADW_STUB:
10181       case LOADD_STUB:
10182       case LOADBU_STUB:
10183       case LOADHU_STUB:
10184         do_readstub(i);break;
10185       case STOREB_STUB:
10186       case STOREH_STUB:
10187       case STOREW_STUB:
10188       case STORED_STUB:
10189         do_writestub(i);break;
10190       case CC_STUB:
10191         do_ccstub(i);break;
10192       case INVCODE_STUB:
10193         do_invstub(i);break;
10194       case FP_STUB:
10195         do_cop1stub(i);break;
10196       case STORELR_STUB:
10197         do_unalignedwritestub(i);break;
10198     }
10199   }
10200
10201   if (instr_addr0_override)
10202     instr_addr[0] = instr_addr0_override;
10203
10204   /* Pass 9 - Linker */
10205   for(i=0;i<linkcount;i++)
10206   {
10207     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
10208     literal_pool(64);
10209     if(!link_addr[i][2])
10210     {
10211       void *stub=out;
10212       void *addr=check_addr(link_addr[i][1]);
10213       emit_extjump(link_addr[i][0],link_addr[i][1]);
10214       if(addr) {
10215         set_jump_target(link_addr[i][0],(int)addr);
10216         add_link(link_addr[i][1],stub);
10217       }
10218       else set_jump_target(link_addr[i][0],(int)stub);
10219     }
10220     else
10221     {
10222       // Internal branch
10223       int target=(link_addr[i][1]-start)>>2;
10224       assert(target>=0&&target<slen);
10225       assert(instr_addr[target]);
10226       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10227       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
10228       //#else
10229       set_jump_target(link_addr[i][0],instr_addr[target]);
10230       //#endif
10231     }
10232   }
10233   // External Branch Targets (jump_in)
10234   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
10235   for(i=0;i<slen;i++)
10236   {
10237     if(bt[i]||i==0)
10238     {
10239       if(instr_addr[i]) // TODO - delay slots (=null)
10240       {
10241         u_int vaddr=start+i*4;
10242         u_int page=get_page(vaddr);
10243         u_int vpage=get_vpage(vaddr);
10244         literal_pool(256);
10245         {
10246           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
10247           assem_debug("jump_in: %x\n",start+i*4);
10248           ll_add(jump_dirty+vpage,vaddr,(void *)out);
10249           int entry_point=do_dirty_stub(i);
10250           ll_add_flags(jump_in+page,vaddr,state_rflags,(void *)entry_point);
10251           // If there was an existing entry in the hash table,
10252           // replace it with the new address.
10253           // Don't add new entries.  We'll insert the
10254           // ones that actually get used in check_addr().
10255           u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
10256           if(ht_bin[0]==vaddr) {
10257             ht_bin[1]=entry_point;
10258           }
10259           if(ht_bin[2]==vaddr) {
10260             ht_bin[3]=entry_point;
10261           }
10262         }
10263       }
10264     }
10265   }
10266   // Write out the literal pool if necessary
10267   literal_pool(0);
10268   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
10269   // Align code
10270   if(((u_int)out)&7) emit_addnop(13);
10271   #endif
10272   assert((u_int)out-(u_int)beginning<MAX_OUTPUT_BLOCK_SIZE);
10273   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
10274   memcpy(copy,source,slen*4);
10275   copy+=slen*4;
10276
10277   end_block(beginning);
10278
10279   // If we're within 256K of the end of the buffer,
10280   // start over from the beginning. (Is 256K enough?)
10281   if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
10282
10283   // Trap writes to any of the pages we compiled
10284   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
10285     invalid_code[i]=0;
10286   }
10287   inv_code_start=inv_code_end=~0;
10288
10289   // for PCSX we need to mark all mirrors too
10290   if(get_page(start)<(RAM_SIZE>>12))
10291     for(i=start>>12;i<=(start+slen*4)>>12;i++)
10292       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
10293       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
10294       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
10295
10296   /* Pass 10 - Free memory by expiring oldest blocks */
10297
10298   int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
10299   while(expirep!=end)
10300   {
10301     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
10302     int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
10303     inv_debug("EXP: Phase %d\n",expirep);
10304     switch((expirep>>11)&3)
10305     {
10306       case 0:
10307         // Clear jump_in and jump_dirty
10308         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
10309         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
10310         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
10311         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
10312         break;
10313       case 1:
10314         // Clear pointers
10315         ll_kill_pointers(jump_out[expirep&2047],base,shift);
10316         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
10317         break;
10318       case 2:
10319         // Clear hash table
10320         for(i=0;i<32;i++) {
10321           u_int *ht_bin=hash_table[((expirep&2047)<<5)+i];
10322           if((ht_bin[3]>>shift)==(base>>shift) ||
10323              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10324             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
10325             ht_bin[2]=ht_bin[3]=-1;
10326           }
10327           if((ht_bin[1]>>shift)==(base>>shift) ||
10328              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
10329             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
10330             ht_bin[0]=ht_bin[2];
10331             ht_bin[1]=ht_bin[3];
10332             ht_bin[2]=ht_bin[3]=-1;
10333           }
10334         }
10335         break;
10336       case 3:
10337         // Clear jump_out
10338         #ifdef __arm__
10339         if((expirep&2047)==0)
10340           do_clear_cache();
10341         #endif
10342         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
10343         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
10344         break;
10345     }
10346     expirep=(expirep+1)&65535;
10347   }
10348   return 0;
10349 }
10350
10351 // vim:shiftwidth=2:expandtab