(VITA) Some dynarec
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <errno.h>
25 #include <sys/mman.h>
26
27 #include "emu_if.h" //emulator interface
28
29 //#define DISASM
30 //#define assem_debug printf
31 //#define inv_debug printf
32 #define assem_debug(...)
33 #define inv_debug(...)
34
35 #ifdef __i386__
36 #include "assem_x86.h"
37 #endif
38 #ifdef __x86_64__
39 #include "assem_x64.h"
40 #endif
41 #ifdef __arm__
42 #include "assem_arm.h"
43 #endif
44
45 #ifdef __BLACKBERRY_QNX__
46 #undef __clear_cache
47 #define __clear_cache(start,end) msync(start, (size_t)((void*)end - (void*)start), MS_SYNC | MS_CACHE_ONLY | MS_INVALIDATE_ICACHE);
48 #elif defined(__MACH__)
49 #include <libkern/OSCacheControl.h>
50 #define __clear_cache mach_clear_cache
51 static void __clear_cache(void *start, void *end) {
52   size_t len = (char *)end - (char *)start;
53   sys_dcache_flush(start, len);
54   sys_icache_invalidate(start, len);
55 }
56 #elif defined(_3DS)
57 #include "3ds_utils.h"
58 #define __clear_cache(start,end) svcFlushProcessDataCache(0xFFFF8001, start, (u32)(end)-(u32)(start))
59 #elif defined(VITA)
60 #define __clear_cache vita_clear_cache
61 static void __clear_cache(void *start, void *end) {
62   size_t len = (char *)end - (char *)start;
63   int block = sceKernelFindMemBlockByAddr(start,len);
64   sceKernelSyncVMDomain(block, start, len);
65 }
66 #endif
67
68 #define MAXBLOCK 4096
69 #define MAX_OUTPUT_BLOCK_SIZE 262144
70
71 struct regstat
72 {
73   signed char regmap_entry[HOST_REGS];
74   signed char regmap[HOST_REGS];
75   uint64_t was32;
76   uint64_t is32;
77   uint64_t wasdirty;
78   uint64_t dirty;
79   uint64_t u;
80   uint64_t uu;
81   u_int wasconst;
82   u_int isconst;
83   u_int loadedconst;             // host regs that have constants loaded
84   u_int waswritten;              // MIPS regs that were used as store base before
85 };
86
87 // note: asm depends on this layout
88 struct ll_entry
89 {
90   u_int vaddr;
91   u_int reg_sv_flags;
92   void *addr;
93   struct ll_entry *next;
94 };
95
96   u_int start;
97   u_int *source;
98   char insn[MAXBLOCK][10];
99   u_char itype[MAXBLOCK];
100   u_char opcode[MAXBLOCK];
101   u_char opcode2[MAXBLOCK];
102   u_char bt[MAXBLOCK];
103   u_char rs1[MAXBLOCK];
104   u_char rs2[MAXBLOCK];
105   u_char rt1[MAXBLOCK];
106   u_char rt2[MAXBLOCK];
107   u_char us1[MAXBLOCK];
108   u_char us2[MAXBLOCK];
109   u_char dep1[MAXBLOCK];
110   u_char dep2[MAXBLOCK];
111   u_char lt1[MAXBLOCK];
112   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
113   static uint64_t gte_rt[MAXBLOCK];
114   static uint64_t gte_unneeded[MAXBLOCK];
115   static u_int smrv[32]; // speculated MIPS register values
116   static u_int smrv_strong; // mask or regs that are likely to have correct values
117   static u_int smrv_weak; // same, but somewhat less likely
118   static u_int smrv_strong_next; // same, but after current insn executes
119   static u_int smrv_weak_next;
120   int imm[MAXBLOCK];
121   u_int ba[MAXBLOCK];
122   char likely[MAXBLOCK];
123   char is_ds[MAXBLOCK];
124   char ooo[MAXBLOCK];
125   uint64_t unneeded_reg[MAXBLOCK];
126   uint64_t unneeded_reg_upper[MAXBLOCK];
127   uint64_t branch_unneeded_reg[MAXBLOCK];
128   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
129   uint64_t p32[MAXBLOCK];
130   uint64_t pr32[MAXBLOCK];
131   signed char regmap_pre[MAXBLOCK][HOST_REGS];
132   static uint64_t current_constmap[HOST_REGS];
133   static uint64_t constmap[MAXBLOCK][HOST_REGS];
134   static struct regstat regs[MAXBLOCK];
135   static struct regstat branch_regs[MAXBLOCK];
136   signed char minimum_free_regs[MAXBLOCK];
137   u_int needed_reg[MAXBLOCK];
138   uint64_t requires_32bit[MAXBLOCK];
139   u_int wont_dirty[MAXBLOCK];
140   u_int will_dirty[MAXBLOCK];
141   int ccadj[MAXBLOCK];
142   int slen;
143   u_int instr_addr[MAXBLOCK];
144   static u_int link_addr[MAXBLOCK][3];
145   int linkcount;
146   u_int stubs[MAXBLOCK*3][8];
147   int stubcount;
148   u_int literals[1024][2];
149   int literalcount;
150   int is_delayslot;
151   int cop1_usable;
152   u_char *out;
153   struct ll_entry *jump_in[4096] __attribute__((aligned(16)));
154   struct ll_entry *jump_out[4096];
155   struct ll_entry *jump_dirty[4096];
156   u_int hash_table[65536][4]  __attribute__((aligned(16)));
157   char shadow[1048576]  __attribute__((aligned(16)));
158   void *copy;
159   int expirep;
160 #ifndef PCSX
161   u_int using_tlb;
162 #else
163   static const u_int using_tlb=0;
164 #endif
165   int new_dynarec_did_compile;
166   int new_dynarec_hacks;
167   u_int stop_after_jal;
168 #ifndef RAM_FIXED
169   static u_int ram_offset;
170 #else
171   static const u_int ram_offset=0;
172 #endif
173   extern u_char restore_candidate[512];
174   extern int cycle_count;
175
176   /* registers that may be allocated */
177   /* 1-31 gpr */
178 #define HIREG 32 // hi
179 #define LOREG 33 // lo
180 #define FSREG 34 // FPU status (FCSR)
181 #define CSREG 35 // Coprocessor status
182 #define CCREG 36 // Cycle count
183 #define INVCP 37 // Pointer to invalid_code
184 #define MMREG 38 // Pointer to memory_map
185 #define ROREG 39 // ram offset (if rdram!=0x80000000)
186 #define TEMPREG 40
187 #define FTEMP 40 // FPU temporary register
188 #define PTEMP 41 // Prefetch temporary register
189 #define TLREG 42 // TLB mapping offset
190 #define RHASH 43 // Return address hash
191 #define RHTBL 44 // Return address hash table address
192 #define RTEMP 45 // JR/JALR address register
193 #define MAXREG 45
194 #define AGEN1 46 // Address generation temporary register
195 #define AGEN2 47 // Address generation temporary register
196 #define MGEN1 48 // Maptable address generation temporary register
197 #define MGEN2 49 // Maptable address generation temporary register
198 #define BTREG 50 // Branch target temporary register
199
200   /* instruction types */
201 #define NOP 0     // No operation
202 #define LOAD 1    // Load
203 #define STORE 2   // Store
204 #define LOADLR 3  // Unaligned load
205 #define STORELR 4 // Unaligned store
206 #define MOV 5     // Move
207 #define ALU 6     // Arithmetic/logic
208 #define MULTDIV 7 // Multiply/divide
209 #define SHIFT 8   // Shift by register
210 #define SHIFTIMM 9// Shift by immediate
211 #define IMM16 10  // 16-bit immediate
212 #define RJUMP 11  // Unconditional jump to register
213 #define UJUMP 12  // Unconditional jump
214 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
215 #define SJUMP 14  // Conditional branch (regimm format)
216 #define COP0 15   // Coprocessor 0
217 #define COP1 16   // Coprocessor 1
218 #define C1LS 17   // Coprocessor 1 load/store
219 #define FJUMP 18  // Conditional branch (floating point)
220 #define FLOAT 19  // Floating point unit
221 #define FCONV 20  // Convert integer to float
222 #define FCOMP 21  // Floating point compare (sets FSREG)
223 #define SYSCALL 22// SYSCALL
224 #define OTHER 23  // Other
225 #define SPAN 24   // Branch/delay slot spans 2 pages
226 #define NI 25     // Not implemented
227 #define HLECALL 26// PCSX fake opcodes for HLE
228 #define COP2 27   // Coprocessor 2 move
229 #define C2LS 28   // Coprocessor 2 load/store
230 #define C2OP 29   // Coprocessor 2 operation
231 #define INTCALL 30// Call interpreter to handle rare corner cases
232
233   /* stubs */
234 #define CC_STUB 1
235 #define FP_STUB 2
236 #define LOADB_STUB 3
237 #define LOADH_STUB 4
238 #define LOADW_STUB 5
239 #define LOADD_STUB 6
240 #define LOADBU_STUB 7
241 #define LOADHU_STUB 8
242 #define STOREB_STUB 9
243 #define STOREH_STUB 10
244 #define STOREW_STUB 11
245 #define STORED_STUB 12
246 #define STORELR_STUB 13
247 #define INVCODE_STUB 14
248
249   /* branch codes */
250 #define TAKEN 1
251 #define NOTTAKEN 2
252 #define NULLDS 3
253
254 // asm linkage
255 int new_recompile_block(int addr);
256 void *get_addr_ht(u_int vaddr);
257 void invalidate_block(u_int block);
258 void invalidate_addr(u_int addr);
259 void remove_hash(int vaddr);
260 void jump_vaddr();
261 void dyna_linker();
262 void dyna_linker_ds();
263 void verify_code();
264 void verify_code_vm();
265 void verify_code_ds();
266 void cc_interrupt();
267 void fp_exception();
268 void fp_exception_ds();
269 void jump_syscall();
270 void jump_syscall_hle();
271 void jump_eret();
272 void jump_hlecall();
273 void jump_intcall();
274 void new_dyna_leave();
275
276 // TLB
277 void TLBWI_new();
278 void TLBWR_new();
279 void read_nomem_new();
280 void read_nomemb_new();
281 void read_nomemh_new();
282 void read_nomemd_new();
283 void write_nomem_new();
284 void write_nomemb_new();
285 void write_nomemh_new();
286 void write_nomemd_new();
287 void write_rdram_new();
288 void write_rdramb_new();
289 void write_rdramh_new();
290 void write_rdramd_new();
291 extern u_int memory_map[1048576];
292
293 // Needed by assembler
294 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
295 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
296 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
297 void load_all_regs(signed char i_regmap[]);
298 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
299 void load_regs_entry(int t);
300 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
301
302 int tracedebug=0;
303
304 //#define DEBUG_CYCLE_COUNT 1
305
306 #define NO_CYCLE_PENALTY_THR 12
307
308 int cycle_multiplier; // 100 for 1.0
309
310 static int CLOCK_ADJUST(int x)
311 {
312   int s=(x>>31)|1;
313   return (x * cycle_multiplier + s * 50) / 100;
314 }
315
316 static void tlb_hacks()
317 {
318 #ifndef DISABLE_TLB
319   // Goldeneye hack
320   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
321   {
322     u_int addr;
323     int n;
324     switch (ROM_HEADER->Country_code&0xFF)
325     {
326       case 0x45: // U
327         addr=0x34b30;
328         break;
329       case 0x4A: // J
330         addr=0x34b70;
331         break;
332       case 0x50: // E
333         addr=0x329f0;
334         break;
335       default:
336         // Unknown country code
337         addr=0;
338         break;
339     }
340     u_int rom_addr=(u_int)rom;
341     #ifdef ROM_COPY
342     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
343     // in the lower 4G of memory to use this hack.  Copy it if necessary.
344     if((void *)rom>(void *)0xffffffff) {
345       munmap(ROM_COPY, 67108864);
346       if(mmap(ROM_COPY, 12582912,
347               PROT_READ | PROT_WRITE,
348               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
349               -1, 0) <= 0) {printf("mmap() failed\n");}
350       memcpy(ROM_COPY,rom,12582912);
351       rom_addr=(u_int)ROM_COPY;
352     }
353     #endif
354     if(addr) {
355       for(n=0x7F000;n<0x80000;n++) {
356         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
357       }
358     }
359   }
360 #endif
361 }
362
363 static u_int get_page(u_int vaddr)
364 {
365 #ifndef PCSX
366   u_int page=(vaddr^0x80000000)>>12;
367 #else
368   u_int page=vaddr&~0xe0000000;
369   if (page < 0x1000000)
370     page &= ~0x0e00000; // RAM mirrors
371   page>>=12;
372 #endif
373 #ifndef DISABLE_TLB
374   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
375 #endif
376   if(page>2048) page=2048+(page&2047);
377   return page;
378 }
379
380 #ifndef PCSX
381 static u_int get_vpage(u_int vaddr)
382 {
383   u_int vpage=(vaddr^0x80000000)>>12;
384 #ifndef DISABLE_TLB
385   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
386 #endif
387   if(vpage>2048) vpage=2048+(vpage&2047);
388   return vpage;
389 }
390 #else
391 // no virtual mem in PCSX
392 static u_int get_vpage(u_int vaddr)
393 {
394   return get_page(vaddr);
395 }
396 #endif
397
398 // Get address from virtual address
399 // This is called from the recompiled JR/JALR instructions
400 void *get_addr(u_int vaddr)
401 {
402   u_int page=get_page(vaddr);
403   u_int vpage=get_vpage(vaddr);
404   struct ll_entry *head;
405   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
406   head=jump_in[page];
407   while(head!=NULL) {
408     if(head->vaddr==vaddr) {
409   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
410       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
411       ht_bin[3]=ht_bin[1];
412       ht_bin[2]=ht_bin[0];
413       ht_bin[1]=(int)head->addr;
414       ht_bin[0]=vaddr;
415       return head->addr;
416     }
417     head=head->next;
418   }
419   head=jump_dirty[vpage];
420   while(head!=NULL) {
421     if(head->vaddr==vaddr) {
422       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
423       // Don't restore blocks which are about to expire from the cache
424       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
425       if(verify_dirty(head->addr)) {
426         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
427         invalid_code[vaddr>>12]=0;
428         inv_code_start=inv_code_end=~0;
429 #ifndef DISABLE_TLB
430         memory_map[vaddr>>12]|=0x40000000;
431 #endif
432         if(vpage<2048) {
433 #ifndef DISABLE_TLB
434           if(tlb_LUT_r[vaddr>>12]) {
435             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
436             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
437           }
438 #endif
439           restore_candidate[vpage>>3]|=1<<(vpage&7);
440         }
441         else restore_candidate[page>>3]|=1<<(page&7);
442         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
443         if(ht_bin[0]==vaddr) {
444           ht_bin[1]=(int)head->addr; // Replace existing entry
445         }
446         else
447         {
448           ht_bin[3]=ht_bin[1];
449           ht_bin[2]=ht_bin[0];
450           ht_bin[1]=(int)head->addr;
451           ht_bin[0]=vaddr;
452         }
453         return head->addr;
454       }
455     }
456     head=head->next;
457   }
458   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
459   int r=new_recompile_block(vaddr);
460   if(r==0) return get_addr(vaddr);
461   // Execute in unmapped page, generate pagefault execption
462   Status|=2;
463   Cause=(vaddr<<31)|0x8;
464   EPC=(vaddr&1)?vaddr-5:vaddr;
465   BadVAddr=(vaddr&~1);
466   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
467   EntryHi=BadVAddr&0xFFFFE000;
468   return get_addr_ht(0x80000000);
469 }
470 // Look up address in hash table first
471 void *get_addr_ht(u_int vaddr)
472 {
473   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
474   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
475   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
476   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
477   return get_addr(vaddr);
478 }
479
480 void clear_all_regs(signed char regmap[])
481 {
482   int hr;
483   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
484 }
485
486 signed char get_reg(signed char regmap[],int r)
487 {
488   int hr;
489   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
490   return -1;
491 }
492
493 // Find a register that is available for two consecutive cycles
494 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
495 {
496   int hr;
497   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
498   return -1;
499 }
500
501 int count_free_regs(signed char regmap[])
502 {
503   int count=0;
504   int hr;
505   for(hr=0;hr<HOST_REGS;hr++)
506   {
507     if(hr!=EXCLUDE_REG) {
508       if(regmap[hr]<0) count++;
509     }
510   }
511   return count;
512 }
513
514 void dirty_reg(struct regstat *cur,signed char reg)
515 {
516   int hr;
517   if(!reg) return;
518   for (hr=0;hr<HOST_REGS;hr++) {
519     if((cur->regmap[hr]&63)==reg) {
520       cur->dirty|=1<<hr;
521     }
522   }
523 }
524
525 // If we dirty the lower half of a 64 bit register which is now being
526 // sign-extended, we need to dump the upper half.
527 // Note: Do this only after completion of the instruction, because
528 // some instructions may need to read the full 64-bit value even if
529 // overwriting it (eg SLTI, DSRA32).
530 static void flush_dirty_uppers(struct regstat *cur)
531 {
532   int hr,reg;
533   for (hr=0;hr<HOST_REGS;hr++) {
534     if((cur->dirty>>hr)&1) {
535       reg=cur->regmap[hr];
536       if(reg>=64)
537         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
538     }
539   }
540 }
541
542 void set_const(struct regstat *cur,signed char reg,uint64_t value)
543 {
544   int hr;
545   if(!reg) return;
546   for (hr=0;hr<HOST_REGS;hr++) {
547     if(cur->regmap[hr]==reg) {
548       cur->isconst|=1<<hr;
549       current_constmap[hr]=value;
550     }
551     else if((cur->regmap[hr]^64)==reg) {
552       cur->isconst|=1<<hr;
553       current_constmap[hr]=value>>32;
554     }
555   }
556 }
557
558 void clear_const(struct regstat *cur,signed char reg)
559 {
560   int hr;
561   if(!reg) return;
562   for (hr=0;hr<HOST_REGS;hr++) {
563     if((cur->regmap[hr]&63)==reg) {
564       cur->isconst&=~(1<<hr);
565     }
566   }
567 }
568
569 int is_const(struct regstat *cur,signed char reg)
570 {
571   int hr;
572   if(reg<0) return 0;
573   if(!reg) return 1;
574   for (hr=0;hr<HOST_REGS;hr++) {
575     if((cur->regmap[hr]&63)==reg) {
576       return (cur->isconst>>hr)&1;
577     }
578   }
579   return 0;
580 }
581 uint64_t get_const(struct regstat *cur,signed char reg)
582 {
583   int hr;
584   if(!reg) return 0;
585   for (hr=0;hr<HOST_REGS;hr++) {
586     if(cur->regmap[hr]==reg) {
587       return current_constmap[hr];
588     }
589   }
590   SysPrintf("Unknown constant in r%d\n",reg);
591   exit(1);
592 }
593
594 // Least soon needed registers
595 // Look at the next ten instructions and see which registers
596 // will be used.  Try not to reallocate these.
597 void lsn(u_char hsn[], int i, int *preferred_reg)
598 {
599   int j;
600   int b=-1;
601   for(j=0;j<9;j++)
602   {
603     if(i+j>=slen) {
604       j=slen-i-1;
605       break;
606     }
607     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
608     {
609       // Don't go past an unconditonal jump
610       j++;
611       break;
612     }
613   }
614   for(;j>=0;j--)
615   {
616     if(rs1[i+j]) hsn[rs1[i+j]]=j;
617     if(rs2[i+j]) hsn[rs2[i+j]]=j;
618     if(rt1[i+j]) hsn[rt1[i+j]]=j;
619     if(rt2[i+j]) hsn[rt2[i+j]]=j;
620     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
621       // Stores can allocate zero
622       hsn[rs1[i+j]]=j;
623       hsn[rs2[i+j]]=j;
624     }
625     // On some architectures stores need invc_ptr
626     #if defined(HOST_IMM8)
627     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
628       hsn[INVCP]=j;
629     }
630     #endif
631     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
632     {
633       hsn[CCREG]=j;
634       b=j;
635     }
636   }
637   if(b>=0)
638   {
639     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
640     {
641       // Follow first branch
642       int t=(ba[i+b]-start)>>2;
643       j=7-b;if(t+j>=slen) j=slen-t-1;
644       for(;j>=0;j--)
645       {
646         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
647         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
648         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
649         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
650       }
651     }
652     // TODO: preferred register based on backward branch
653   }
654   // Delay slot should preferably not overwrite branch conditions or cycle count
655   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
656     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
657     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
658     hsn[CCREG]=1;
659     // ...or hash tables
660     hsn[RHASH]=1;
661     hsn[RHTBL]=1;
662   }
663   // Coprocessor load/store needs FTEMP, even if not declared
664   if(itype[i]==C1LS||itype[i]==C2LS) {
665     hsn[FTEMP]=0;
666   }
667   // Load L/R also uses FTEMP as a temporary register
668   if(itype[i]==LOADLR) {
669     hsn[FTEMP]=0;
670   }
671   // Also SWL/SWR/SDL/SDR
672   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
673     hsn[FTEMP]=0;
674   }
675   // Don't remove the TLB registers either
676   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
677     hsn[TLREG]=0;
678   }
679   // Don't remove the miniht registers
680   if(itype[i]==UJUMP||itype[i]==RJUMP)
681   {
682     hsn[RHASH]=0;
683     hsn[RHTBL]=0;
684   }
685 }
686
687 // We only want to allocate registers if we're going to use them again soon
688 int needed_again(int r, int i)
689 {
690   int j;
691   int b=-1;
692   int rn=10;
693
694   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
695   {
696     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
697       return 0; // Don't need any registers if exiting the block
698   }
699   for(j=0;j<9;j++)
700   {
701     if(i+j>=slen) {
702       j=slen-i-1;
703       break;
704     }
705     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
706     {
707       // Don't go past an unconditonal jump
708       j++;
709       break;
710     }
711     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
712     {
713       break;
714     }
715   }
716   for(;j>=1;j--)
717   {
718     if(rs1[i+j]==r) rn=j;
719     if(rs2[i+j]==r) rn=j;
720     if((unneeded_reg[i+j]>>r)&1) rn=10;
721     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
722     {
723       b=j;
724     }
725   }
726   /*
727   if(b>=0)
728   {
729     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
730     {
731       // Follow first branch
732       int o=rn;
733       int t=(ba[i+b]-start)>>2;
734       j=7-b;if(t+j>=slen) j=slen-t-1;
735       for(;j>=0;j--)
736       {
737         if(!((unneeded_reg[t+j]>>r)&1)) {
738           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
739           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
740         }
741         else rn=o;
742       }
743     }
744   }*/
745   if(rn<10) return 1;
746   return 0;
747 }
748
749 // Try to match register allocations at the end of a loop with those
750 // at the beginning
751 int loop_reg(int i, int r, int hr)
752 {
753   int j,k;
754   for(j=0;j<9;j++)
755   {
756     if(i+j>=slen) {
757       j=slen-i-1;
758       break;
759     }
760     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
761     {
762       // Don't go past an unconditonal jump
763       j++;
764       break;
765     }
766   }
767   k=0;
768   if(i>0){
769     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
770       k--;
771   }
772   for(;k<j;k++)
773   {
774     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
775     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
776     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
777     {
778       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
779       {
780         int t=(ba[i+k]-start)>>2;
781         int reg=get_reg(regs[t].regmap_entry,r);
782         if(reg>=0) return reg;
783         //reg=get_reg(regs[t+1].regmap_entry,r);
784         //if(reg>=0) return reg;
785       }
786     }
787   }
788   return hr;
789 }
790
791
792 // Allocate every register, preserving source/target regs
793 void alloc_all(struct regstat *cur,int i)
794 {
795   int hr;
796
797   for(hr=0;hr<HOST_REGS;hr++) {
798     if(hr!=EXCLUDE_REG) {
799       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
800          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
801       {
802         cur->regmap[hr]=-1;
803         cur->dirty&=~(1<<hr);
804       }
805       // Don't need zeros
806       if((cur->regmap[hr]&63)==0)
807       {
808         cur->regmap[hr]=-1;
809         cur->dirty&=~(1<<hr);
810       }
811     }
812   }
813 }
814
815 #ifndef FORCE32
816 void div64(int64_t dividend,int64_t divisor)
817 {
818   lo=dividend/divisor;
819   hi=dividend%divisor;
820   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
821   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
822 }
823 void divu64(uint64_t dividend,uint64_t divisor)
824 {
825   lo=dividend/divisor;
826   hi=dividend%divisor;
827   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
828   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
829 }
830
831 void mult64(uint64_t m1,uint64_t m2)
832 {
833    unsigned long long int op1, op2, op3, op4;
834    unsigned long long int result1, result2, result3, result4;
835    unsigned long long int temp1, temp2, temp3, temp4;
836    int sign = 0;
837
838    if (m1 < 0)
839      {
840     op2 = -m1;
841     sign = 1 - sign;
842      }
843    else op2 = m1;
844    if (m2 < 0)
845      {
846     op4 = -m2;
847     sign = 1 - sign;
848      }
849    else op4 = m2;
850
851    op1 = op2 & 0xFFFFFFFF;
852    op2 = (op2 >> 32) & 0xFFFFFFFF;
853    op3 = op4 & 0xFFFFFFFF;
854    op4 = (op4 >> 32) & 0xFFFFFFFF;
855
856    temp1 = op1 * op3;
857    temp2 = (temp1 >> 32) + op1 * op4;
858    temp3 = op2 * op3;
859    temp4 = (temp3 >> 32) + op2 * op4;
860
861    result1 = temp1 & 0xFFFFFFFF;
862    result2 = temp2 + (temp3 & 0xFFFFFFFF);
863    result3 = (result2 >> 32) + temp4;
864    result4 = (result3 >> 32);
865
866    lo = result1 | (result2 << 32);
867    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
868    if (sign)
869      {
870     hi = ~hi;
871     if (!lo) hi++;
872     else lo = ~lo + 1;
873      }
874 }
875
876 void multu64(uint64_t m1,uint64_t m2)
877 {
878    unsigned long long int op1, op2, op3, op4;
879    unsigned long long int result1, result2, result3, result4;
880    unsigned long long int temp1, temp2, temp3, temp4;
881
882    op1 = m1 & 0xFFFFFFFF;
883    op2 = (m1 >> 32) & 0xFFFFFFFF;
884    op3 = m2 & 0xFFFFFFFF;
885    op4 = (m2 >> 32) & 0xFFFFFFFF;
886
887    temp1 = op1 * op3;
888    temp2 = (temp1 >> 32) + op1 * op4;
889    temp3 = op2 * op3;
890    temp4 = (temp3 >> 32) + op2 * op4;
891
892    result1 = temp1 & 0xFFFFFFFF;
893    result2 = temp2 + (temp3 & 0xFFFFFFFF);
894    result3 = (result2 >> 32) + temp4;
895    result4 = (result3 >> 32);
896
897    lo = result1 | (result2 << 32);
898    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
899
900   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
901   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
902 }
903
904 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
905 {
906   if(bits) {
907     original<<=64-bits;
908     original>>=64-bits;
909     loaded<<=bits;
910     original|=loaded;
911   }
912   else original=loaded;
913   return original;
914 }
915 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
916 {
917   if(bits^56) {
918     original>>=64-(bits^56);
919     original<<=64-(bits^56);
920     loaded>>=bits^56;
921     original|=loaded;
922   }
923   else original=loaded;
924   return original;
925 }
926 #endif
927
928 #ifdef __i386__
929 #include "assem_x86.c"
930 #endif
931 #ifdef __x86_64__
932 #include "assem_x64.c"
933 #endif
934 #ifdef __arm__
935 #include "assem_arm.c"
936 #endif
937
938 // Add virtual address mapping to linked list
939 void ll_add(struct ll_entry **head,int vaddr,void *addr)
940 {
941   struct ll_entry *new_entry;
942   new_entry=malloc(sizeof(struct ll_entry));
943   assert(new_entry!=NULL);
944   new_entry->vaddr=vaddr;
945   new_entry->reg_sv_flags=0;
946   new_entry->addr=addr;
947   new_entry->next=*head;
948   *head=new_entry;
949 }
950
951 void ll_add_flags(struct ll_entry **head,int vaddr,u_int reg_sv_flags,void *addr)
952 {
953   ll_add(head,vaddr,addr);
954   (*head)->reg_sv_flags=reg_sv_flags;
955 }
956
957 // Check if an address is already compiled
958 // but don't return addresses which are about to expire from the cache
959 void *check_addr(u_int vaddr)
960 {
961   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
962   if(ht_bin[0]==vaddr) {
963     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
964       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
965   }
966   if(ht_bin[2]==vaddr) {
967     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
968       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
969   }
970   u_int page=get_page(vaddr);
971   struct ll_entry *head;
972   head=jump_in[page];
973   while(head!=NULL) {
974     if(head->vaddr==vaddr) {
975       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
976         // Update existing entry with current address
977         if(ht_bin[0]==vaddr) {
978           ht_bin[1]=(int)head->addr;
979           return head->addr;
980         }
981         if(ht_bin[2]==vaddr) {
982           ht_bin[3]=(int)head->addr;
983           return head->addr;
984         }
985         // Insert into hash table with low priority.
986         // Don't evict existing entries, as they are probably
987         // addresses that are being accessed frequently.
988         if(ht_bin[0]==-1) {
989           ht_bin[1]=(int)head->addr;
990           ht_bin[0]=vaddr;
991         }else if(ht_bin[2]==-1) {
992           ht_bin[3]=(int)head->addr;
993           ht_bin[2]=vaddr;
994         }
995         return head->addr;
996       }
997     }
998     head=head->next;
999   }
1000   return 0;
1001 }
1002
1003 void remove_hash(int vaddr)
1004 {
1005   //printf("remove hash: %x\n",vaddr);
1006   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1007   if(ht_bin[2]==vaddr) {
1008     ht_bin[2]=ht_bin[3]=-1;
1009   }
1010   if(ht_bin[0]==vaddr) {
1011     ht_bin[0]=ht_bin[2];
1012     ht_bin[1]=ht_bin[3];
1013     ht_bin[2]=ht_bin[3]=-1;
1014   }
1015 }
1016
1017 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1018 {
1019   struct ll_entry *next;
1020   while(*head) {
1021     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) ||
1022        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1023     {
1024       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1025       remove_hash((*head)->vaddr);
1026       next=(*head)->next;
1027       free(*head);
1028       *head=next;
1029     }
1030     else
1031     {
1032       head=&((*head)->next);
1033     }
1034   }
1035 }
1036
1037 // Remove all entries from linked list
1038 void ll_clear(struct ll_entry **head)
1039 {
1040   struct ll_entry *cur;
1041   struct ll_entry *next;
1042   if(cur=*head) {
1043     *head=0;
1044     while(cur) {
1045       next=cur->next;
1046       free(cur);
1047       cur=next;
1048     }
1049   }
1050 }
1051
1052 // Dereference the pointers and remove if it matches
1053 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1054 {
1055   while(head) {
1056     int ptr=get_pointer(head->addr);
1057     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1058     if(((ptr>>shift)==(addr>>shift)) ||
1059        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1060     {
1061       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1062       u_int host_addr=(u_int)kill_pointer(head->addr);
1063       #ifdef __arm__
1064         needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1065       #endif
1066     }
1067     head=head->next;
1068   }
1069 }
1070
1071 // This is called when we write to a compiled block (see do_invstub)
1072 void invalidate_page(u_int page)
1073 {
1074   struct ll_entry *head;
1075   struct ll_entry *next;
1076   head=jump_in[page];
1077   jump_in[page]=0;
1078   while(head!=NULL) {
1079     inv_debug("INVALIDATE: %x\n",head->vaddr);
1080     remove_hash(head->vaddr);
1081     next=head->next;
1082     free(head);
1083     head=next;
1084   }
1085   head=jump_out[page];
1086   jump_out[page]=0;
1087   while(head!=NULL) {
1088     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1089     u_int host_addr=(u_int)kill_pointer(head->addr);
1090     #ifdef __arm__
1091       needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1092     #endif
1093     next=head->next;
1094     free(head);
1095     head=next;
1096   }
1097 }
1098
1099 static void invalidate_block_range(u_int block, u_int first, u_int last)
1100 {
1101   u_int page=get_page(block<<12);
1102   //printf("first=%d last=%d\n",first,last);
1103   invalidate_page(page);
1104   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1105   assert(last<page+5);
1106   // Invalidate the adjacent pages if a block crosses a 4K boundary
1107   while(first<page) {
1108     invalidate_page(first);
1109     first++;
1110   }
1111   for(first=page+1;first<last;first++) {
1112     invalidate_page(first);
1113   }
1114   #ifdef __arm__
1115     do_clear_cache();
1116   #endif
1117
1118   // Don't trap writes
1119   invalid_code[block]=1;
1120 #ifndef DISABLE_TLB
1121   // If there is a valid TLB entry for this page, remove write protect
1122   if(tlb_LUT_w[block]) {
1123     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1124     // CHECK: Is this right?
1125     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1126     u_int real_block=tlb_LUT_w[block]>>12;
1127     invalid_code[real_block]=1;
1128     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1129   }
1130   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1131 #endif
1132
1133   #ifdef USE_MINI_HT
1134   memset(mini_ht,-1,sizeof(mini_ht));
1135   #endif
1136 }
1137
1138 void invalidate_block(u_int block)
1139 {
1140   u_int page=get_page(block<<12);
1141   u_int vpage=get_vpage(block<<12);
1142   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1143   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1144   u_int first,last;
1145   first=last=page;
1146   struct ll_entry *head;
1147   head=jump_dirty[vpage];
1148   //printf("page=%d vpage=%d\n",page,vpage);
1149   while(head!=NULL) {
1150     u_int start,end;
1151     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1152       get_bounds((int)head->addr,&start,&end);
1153       //printf("start: %x end: %x\n",start,end);
1154       if(page<2048&&start>=(u_int)rdram&&end<(u_int)rdram+RAM_SIZE) {
1155         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1156           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1157           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1158         }
1159       }
1160 #ifndef DISABLE_TLB
1161       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1162         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1163           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1164           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1165         }
1166       }
1167 #endif
1168     }
1169     head=head->next;
1170   }
1171   invalidate_block_range(block,first,last);
1172 }
1173
1174 void invalidate_addr(u_int addr)
1175 {
1176 #ifdef PCSX
1177   //static int rhits;
1178   // this check is done by the caller
1179   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1180   u_int page=get_vpage(addr);
1181   if(page<2048) { // RAM
1182     struct ll_entry *head;
1183     u_int addr_min=~0, addr_max=0;
1184     u_int mask=RAM_SIZE-1;
1185     u_int addr_main=0x80000000|(addr&mask);
1186     int pg1;
1187     inv_code_start=addr_main&~0xfff;
1188     inv_code_end=addr_main|0xfff;
1189     pg1=page;
1190     if (pg1>0) {
1191       // must check previous page too because of spans..
1192       pg1--;
1193       inv_code_start-=0x1000;
1194     }
1195     for(;pg1<=page;pg1++) {
1196       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1197         u_int start,end;
1198         get_bounds((int)head->addr,&start,&end);
1199         if(ram_offset) {
1200           start-=ram_offset;
1201           end-=ram_offset;
1202         }
1203         if(start<=addr_main&&addr_main<end) {
1204           if(start<addr_min) addr_min=start;
1205           if(end>addr_max) addr_max=end;
1206         }
1207         else if(addr_main<start) {
1208           if(start<inv_code_end)
1209             inv_code_end=start-1;
1210         }
1211         else {
1212           if(end>inv_code_start)
1213             inv_code_start=end;
1214         }
1215       }
1216     }
1217     if (addr_min!=~0) {
1218       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1219       inv_code_start=inv_code_end=~0;
1220       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1221       return;
1222     }
1223     else {
1224       inv_code_start=(addr&~mask)|(inv_code_start&mask);
1225       inv_code_end=(addr&~mask)|(inv_code_end&mask);
1226       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);
1227       return;
1228     }
1229   }
1230 #endif
1231   invalidate_block(addr>>12);
1232 }
1233
1234 // This is called when loading a save state.
1235 // Anything could have changed, so invalidate everything.
1236 void invalidate_all_pages()
1237 {
1238   u_int page,n;
1239   for(page=0;page<4096;page++)
1240     invalidate_page(page);
1241   for(page=0;page<1048576;page++)
1242     if(!invalid_code[page]) {
1243       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1244       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1245     }
1246   #ifdef __arm__
1247   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1248   #endif
1249   #ifdef USE_MINI_HT
1250   memset(mini_ht,-1,sizeof(mini_ht));
1251   #endif
1252   #ifndef DISABLE_TLB
1253   // TLB
1254   for(page=0;page<0x100000;page++) {
1255     if(tlb_LUT_r[page]) {
1256       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1257       if(!tlb_LUT_w[page]||!invalid_code[page])
1258         memory_map[page]|=0x40000000; // Write protect
1259     }
1260     else memory_map[page]=-1;
1261     if(page==0x80000) page=0xC0000;
1262   }
1263   tlb_hacks();
1264   #endif
1265 }
1266
1267 // Add an entry to jump_out after making a link
1268 void add_link(u_int vaddr,void *src)
1269 {
1270   u_int page=get_page(vaddr);
1271   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1272   int *ptr=(int *)(src+4);
1273   assert((*ptr&0x0fff0000)==0x059f0000);
1274   ll_add(jump_out+page,vaddr,src);
1275   //int ptr=get_pointer(src);
1276   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1277 }
1278
1279 // If a code block was found to be unmodified (bit was set in
1280 // restore_candidate) and it remains unmodified (bit is clear
1281 // in invalid_code) then move the entries for that 4K page from
1282 // the dirty list to the clean list.
1283 void clean_blocks(u_int page)
1284 {
1285   struct ll_entry *head;
1286   inv_debug("INV: clean_blocks page=%d\n",page);
1287   head=jump_dirty[page];
1288   while(head!=NULL) {
1289     if(!invalid_code[head->vaddr>>12]) {
1290       // Don't restore blocks which are about to expire from the cache
1291       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1292         u_int start,end;
1293         if(verify_dirty((int)head->addr)) {
1294           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1295           u_int i;
1296           u_int inv=0;
1297           get_bounds((int)head->addr,&start,&end);
1298           if(start-(u_int)rdram<RAM_SIZE) {
1299             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1300               inv|=invalid_code[i];
1301             }
1302           }
1303 #ifndef DISABLE_TLB
1304           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1305             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1306             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1307             if(addr<start||addr>=end) inv=1;
1308           }
1309 #endif
1310           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1311             inv=1;
1312           }
1313           if(!inv) {
1314             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1315             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1316               u_int ppage=page;
1317 #ifndef DISABLE_TLB
1318               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1319 #endif
1320               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1321               //printf("page=%x, addr=%x\n",page,head->vaddr);
1322               //assert(head->vaddr>>12==(page|0x80000));
1323               ll_add_flags(jump_in+ppage,head->vaddr,head->reg_sv_flags,clean_addr);
1324               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1325               if(ht_bin[0]==head->vaddr) {
1326                 ht_bin[1]=(int)clean_addr; // Replace existing entry
1327               }
1328               if(ht_bin[2]==head->vaddr) {
1329                 ht_bin[3]=(int)clean_addr; // Replace existing entry
1330               }
1331             }
1332           }
1333         }
1334       }
1335     }
1336     head=head->next;
1337   }
1338 }
1339
1340
1341 void mov_alloc(struct regstat *current,int i)
1342 {
1343   // Note: Don't need to actually alloc the source registers
1344   if((~current->is32>>rs1[i])&1) {
1345     //alloc_reg64(current,i,rs1[i]);
1346     alloc_reg64(current,i,rt1[i]);
1347     current->is32&=~(1LL<<rt1[i]);
1348   } else {
1349     //alloc_reg(current,i,rs1[i]);
1350     alloc_reg(current,i,rt1[i]);
1351     current->is32|=(1LL<<rt1[i]);
1352   }
1353   clear_const(current,rs1[i]);
1354   clear_const(current,rt1[i]);
1355   dirty_reg(current,rt1[i]);
1356 }
1357
1358 void shiftimm_alloc(struct regstat *current,int i)
1359 {
1360   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1361   {
1362     if(rt1[i]) {
1363       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1364       else lt1[i]=rs1[i];
1365       alloc_reg(current,i,rt1[i]);
1366       current->is32|=1LL<<rt1[i];
1367       dirty_reg(current,rt1[i]);
1368       if(is_const(current,rs1[i])) {
1369         int v=get_const(current,rs1[i]);
1370         if(opcode2[i]==0x00) set_const(current,rt1[i],v<<imm[i]);
1371         if(opcode2[i]==0x02) set_const(current,rt1[i],(u_int)v>>imm[i]);
1372         if(opcode2[i]==0x03) set_const(current,rt1[i],v>>imm[i]);
1373       }
1374       else clear_const(current,rt1[i]);
1375     }
1376   }
1377   else
1378   {
1379     clear_const(current,rs1[i]);
1380     clear_const(current,rt1[i]);
1381   }
1382
1383   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1384   {
1385     if(rt1[i]) {
1386       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1387       alloc_reg64(current,i,rt1[i]);
1388       current->is32&=~(1LL<<rt1[i]);
1389       dirty_reg(current,rt1[i]);
1390     }
1391   }
1392   if(opcode2[i]==0x3c) // DSLL32
1393   {
1394     if(rt1[i]) {
1395       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1396       alloc_reg64(current,i,rt1[i]);
1397       current->is32&=~(1LL<<rt1[i]);
1398       dirty_reg(current,rt1[i]);
1399     }
1400   }
1401   if(opcode2[i]==0x3e) // DSRL32
1402   {
1403     if(rt1[i]) {
1404       alloc_reg64(current,i,rs1[i]);
1405       if(imm[i]==32) {
1406         alloc_reg64(current,i,rt1[i]);
1407         current->is32&=~(1LL<<rt1[i]);
1408       } else {
1409         alloc_reg(current,i,rt1[i]);
1410         current->is32|=1LL<<rt1[i];
1411       }
1412       dirty_reg(current,rt1[i]);
1413     }
1414   }
1415   if(opcode2[i]==0x3f) // DSRA32
1416   {
1417     if(rt1[i]) {
1418       alloc_reg64(current,i,rs1[i]);
1419       alloc_reg(current,i,rt1[i]);
1420       current->is32|=1LL<<rt1[i];
1421       dirty_reg(current,rt1[i]);
1422     }
1423   }
1424 }
1425
1426 void shift_alloc(struct regstat *current,int i)
1427 {
1428   if(rt1[i]) {
1429     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1430     {
1431       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1432       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1433       alloc_reg(current,i,rt1[i]);
1434       if(rt1[i]==rs2[i]) {
1435         alloc_reg_temp(current,i,-1);
1436         minimum_free_regs[i]=1;
1437       }
1438       current->is32|=1LL<<rt1[i];
1439     } else { // DSLLV/DSRLV/DSRAV
1440       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1441       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1442       alloc_reg64(current,i,rt1[i]);
1443       current->is32&=~(1LL<<rt1[i]);
1444       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1445       {
1446         alloc_reg_temp(current,i,-1);
1447         minimum_free_regs[i]=1;
1448       }
1449     }
1450     clear_const(current,rs1[i]);
1451     clear_const(current,rs2[i]);
1452     clear_const(current,rt1[i]);
1453     dirty_reg(current,rt1[i]);
1454   }
1455 }
1456
1457 void alu_alloc(struct regstat *current,int i)
1458 {
1459   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1460     if(rt1[i]) {
1461       if(rs1[i]&&rs2[i]) {
1462         alloc_reg(current,i,rs1[i]);
1463         alloc_reg(current,i,rs2[i]);
1464       }
1465       else {
1466         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1467         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1468       }
1469       alloc_reg(current,i,rt1[i]);
1470     }
1471     current->is32|=1LL<<rt1[i];
1472   }
1473   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1474     if(rt1[i]) {
1475       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1476       {
1477         alloc_reg64(current,i,rs1[i]);
1478         alloc_reg64(current,i,rs2[i]);
1479         alloc_reg(current,i,rt1[i]);
1480       } else {
1481         alloc_reg(current,i,rs1[i]);
1482         alloc_reg(current,i,rs2[i]);
1483         alloc_reg(current,i,rt1[i]);
1484       }
1485     }
1486     current->is32|=1LL<<rt1[i];
1487   }
1488   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1489     if(rt1[i]) {
1490       if(rs1[i]&&rs2[i]) {
1491         alloc_reg(current,i,rs1[i]);
1492         alloc_reg(current,i,rs2[i]);
1493       }
1494       else
1495       {
1496         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1497         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1498       }
1499       alloc_reg(current,i,rt1[i]);
1500       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1501       {
1502         if(!((current->uu>>rt1[i])&1)) {
1503           alloc_reg64(current,i,rt1[i]);
1504         }
1505         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1506           if(rs1[i]&&rs2[i]) {
1507             alloc_reg64(current,i,rs1[i]);
1508             alloc_reg64(current,i,rs2[i]);
1509           }
1510           else
1511           {
1512             // Is is really worth it to keep 64-bit values in registers?
1513             #ifdef NATIVE_64BIT
1514             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1515             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1516             #endif
1517           }
1518         }
1519         current->is32&=~(1LL<<rt1[i]);
1520       } else {
1521         current->is32|=1LL<<rt1[i];
1522       }
1523     }
1524   }
1525   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1526     if(rt1[i]) {
1527       if(rs1[i]&&rs2[i]) {
1528         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1529           alloc_reg64(current,i,rs1[i]);
1530           alloc_reg64(current,i,rs2[i]);
1531           alloc_reg64(current,i,rt1[i]);
1532         } else {
1533           alloc_reg(current,i,rs1[i]);
1534           alloc_reg(current,i,rs2[i]);
1535           alloc_reg(current,i,rt1[i]);
1536         }
1537       }
1538       else {
1539         alloc_reg(current,i,rt1[i]);
1540         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1541           // DADD used as move, or zeroing
1542           // If we have a 64-bit source, then make the target 64 bits too
1543           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1544             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1545             alloc_reg64(current,i,rt1[i]);
1546           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1547             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1548             alloc_reg64(current,i,rt1[i]);
1549           }
1550           if(opcode2[i]>=0x2e&&rs2[i]) {
1551             // DSUB used as negation - 64-bit result
1552             // If we have a 32-bit register, extend it to 64 bits
1553             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1554             alloc_reg64(current,i,rt1[i]);
1555           }
1556         }
1557       }
1558       if(rs1[i]&&rs2[i]) {
1559         current->is32&=~(1LL<<rt1[i]);
1560       } else if(rs1[i]) {
1561         current->is32&=~(1LL<<rt1[i]);
1562         if((current->is32>>rs1[i])&1)
1563           current->is32|=1LL<<rt1[i];
1564       } else if(rs2[i]) {
1565         current->is32&=~(1LL<<rt1[i]);
1566         if((current->is32>>rs2[i])&1)
1567           current->is32|=1LL<<rt1[i];
1568       } else {
1569         current->is32|=1LL<<rt1[i];
1570       }
1571     }
1572   }
1573   clear_const(current,rs1[i]);
1574   clear_const(current,rs2[i]);
1575   clear_const(current,rt1[i]);
1576   dirty_reg(current,rt1[i]);
1577 }
1578
1579 void imm16_alloc(struct regstat *current,int i)
1580 {
1581   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1582   else lt1[i]=rs1[i];
1583   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1584   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1585     current->is32&=~(1LL<<rt1[i]);
1586     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1587       // TODO: Could preserve the 32-bit flag if the immediate is zero
1588       alloc_reg64(current,i,rt1[i]);
1589       alloc_reg64(current,i,rs1[i]);
1590     }
1591     clear_const(current,rs1[i]);
1592     clear_const(current,rt1[i]);
1593   }
1594   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1595     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1596     current->is32|=1LL<<rt1[i];
1597     clear_const(current,rs1[i]);
1598     clear_const(current,rt1[i]);
1599   }
1600   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1601     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1602       if(rs1[i]!=rt1[i]) {
1603         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1604         alloc_reg64(current,i,rt1[i]);
1605         current->is32&=~(1LL<<rt1[i]);
1606       }
1607     }
1608     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1609     if(is_const(current,rs1[i])) {
1610       int v=get_const(current,rs1[i]);
1611       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1612       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1613       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1614     }
1615     else clear_const(current,rt1[i]);
1616   }
1617   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1618     if(is_const(current,rs1[i])) {
1619       int v=get_const(current,rs1[i]);
1620       set_const(current,rt1[i],v+imm[i]);
1621     }
1622     else clear_const(current,rt1[i]);
1623     current->is32|=1LL<<rt1[i];
1624   }
1625   else {
1626     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1627     current->is32|=1LL<<rt1[i];
1628   }
1629   dirty_reg(current,rt1[i]);
1630 }
1631
1632 void load_alloc(struct regstat *current,int i)
1633 {
1634   clear_const(current,rt1[i]);
1635   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1636   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1637   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1638   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1639     alloc_reg(current,i,rt1[i]);
1640     assert(get_reg(current->regmap,rt1[i])>=0);
1641     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1642     {
1643       current->is32&=~(1LL<<rt1[i]);
1644       alloc_reg64(current,i,rt1[i]);
1645     }
1646     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1647     {
1648       current->is32&=~(1LL<<rt1[i]);
1649       alloc_reg64(current,i,rt1[i]);
1650       alloc_all(current,i);
1651       alloc_reg64(current,i,FTEMP);
1652       minimum_free_regs[i]=HOST_REGS;
1653     }
1654     else current->is32|=1LL<<rt1[i];
1655     dirty_reg(current,rt1[i]);
1656     // If using TLB, need a register for pointer to the mapping table
1657     if(using_tlb) alloc_reg(current,i,TLREG);
1658     // LWL/LWR need a temporary register for the old value
1659     if(opcode[i]==0x22||opcode[i]==0x26)
1660     {
1661       alloc_reg(current,i,FTEMP);
1662       alloc_reg_temp(current,i,-1);
1663       minimum_free_regs[i]=1;
1664     }
1665   }
1666   else
1667   {
1668     // Load to r0 or unneeded register (dummy load)
1669     // but we still need a register to calculate the address
1670     if(opcode[i]==0x22||opcode[i]==0x26)
1671     {
1672       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1673     }
1674     // If using TLB, need a register for pointer to the mapping table
1675     if(using_tlb) alloc_reg(current,i,TLREG);
1676     alloc_reg_temp(current,i,-1);
1677     minimum_free_regs[i]=1;
1678     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1679     {
1680       alloc_all(current,i);
1681       alloc_reg64(current,i,FTEMP);
1682       minimum_free_regs[i]=HOST_REGS;
1683     }
1684   }
1685 }
1686
1687 void store_alloc(struct regstat *current,int i)
1688 {
1689   clear_const(current,rs2[i]);
1690   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1691   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1692   alloc_reg(current,i,rs2[i]);
1693   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1694     alloc_reg64(current,i,rs2[i]);
1695     if(rs2[i]) alloc_reg(current,i,FTEMP);
1696   }
1697   // If using TLB, need a register for pointer to the mapping table
1698   if(using_tlb) alloc_reg(current,i,TLREG);
1699   #if defined(HOST_IMM8)
1700   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1701   else alloc_reg(current,i,INVCP);
1702   #endif
1703   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1704     alloc_reg(current,i,FTEMP);
1705   }
1706   // We need a temporary register for address generation
1707   alloc_reg_temp(current,i,-1);
1708   minimum_free_regs[i]=1;
1709 }
1710
1711 void c1ls_alloc(struct regstat *current,int i)
1712 {
1713   //clear_const(current,rs1[i]); // FIXME
1714   clear_const(current,rt1[i]);
1715   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1716   alloc_reg(current,i,CSREG); // Status
1717   alloc_reg(current,i,FTEMP);
1718   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1719     alloc_reg64(current,i,FTEMP);
1720   }
1721   // If using TLB, need a register for pointer to the mapping table
1722   if(using_tlb) alloc_reg(current,i,TLREG);
1723   #if defined(HOST_IMM8)
1724   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1725   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1726     alloc_reg(current,i,INVCP);
1727   #endif
1728   // We need a temporary register for address generation
1729   alloc_reg_temp(current,i,-1);
1730 }
1731
1732 void c2ls_alloc(struct regstat *current,int i)
1733 {
1734   clear_const(current,rt1[i]);
1735   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1736   alloc_reg(current,i,FTEMP);
1737   // If using TLB, need a register for pointer to the mapping table
1738   if(using_tlb) alloc_reg(current,i,TLREG);
1739   #if defined(HOST_IMM8)
1740   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1741   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1742     alloc_reg(current,i,INVCP);
1743   #endif
1744   // We need a temporary register for address generation
1745   alloc_reg_temp(current,i,-1);
1746   minimum_free_regs[i]=1;
1747 }
1748
1749 #ifndef multdiv_alloc
1750 void multdiv_alloc(struct regstat *current,int i)
1751 {
1752   //  case 0x18: MULT
1753   //  case 0x19: MULTU
1754   //  case 0x1A: DIV
1755   //  case 0x1B: DIVU
1756   //  case 0x1C: DMULT
1757   //  case 0x1D: DMULTU
1758   //  case 0x1E: DDIV
1759   //  case 0x1F: DDIVU
1760   clear_const(current,rs1[i]);
1761   clear_const(current,rs2[i]);
1762   if(rs1[i]&&rs2[i])
1763   {
1764     if((opcode2[i]&4)==0) // 32-bit
1765     {
1766       current->u&=~(1LL<<HIREG);
1767       current->u&=~(1LL<<LOREG);
1768       alloc_reg(current,i,HIREG);
1769       alloc_reg(current,i,LOREG);
1770       alloc_reg(current,i,rs1[i]);
1771       alloc_reg(current,i,rs2[i]);
1772       current->is32|=1LL<<HIREG;
1773       current->is32|=1LL<<LOREG;
1774       dirty_reg(current,HIREG);
1775       dirty_reg(current,LOREG);
1776     }
1777     else // 64-bit
1778     {
1779       current->u&=~(1LL<<HIREG);
1780       current->u&=~(1LL<<LOREG);
1781       current->uu&=~(1LL<<HIREG);
1782       current->uu&=~(1LL<<LOREG);
1783       alloc_reg64(current,i,HIREG);
1784       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1785       alloc_reg64(current,i,rs1[i]);
1786       alloc_reg64(current,i,rs2[i]);
1787       alloc_all(current,i);
1788       current->is32&=~(1LL<<HIREG);
1789       current->is32&=~(1LL<<LOREG);
1790       dirty_reg(current,HIREG);
1791       dirty_reg(current,LOREG);
1792       minimum_free_regs[i]=HOST_REGS;
1793     }
1794   }
1795   else
1796   {
1797     // Multiply by zero is zero.
1798     // MIPS does not have a divide by zero exception.
1799     // The result is undefined, we return zero.
1800     alloc_reg(current,i,HIREG);
1801     alloc_reg(current,i,LOREG);
1802     current->is32|=1LL<<HIREG;
1803     current->is32|=1LL<<LOREG;
1804     dirty_reg(current,HIREG);
1805     dirty_reg(current,LOREG);
1806   }
1807 }
1808 #endif
1809
1810 void cop0_alloc(struct regstat *current,int i)
1811 {
1812   if(opcode2[i]==0) // MFC0
1813   {
1814     if(rt1[i]) {
1815       clear_const(current,rt1[i]);
1816       alloc_all(current,i);
1817       alloc_reg(current,i,rt1[i]);
1818       current->is32|=1LL<<rt1[i];
1819       dirty_reg(current,rt1[i]);
1820     }
1821   }
1822   else if(opcode2[i]==4) // MTC0
1823   {
1824     if(rs1[i]){
1825       clear_const(current,rs1[i]);
1826       alloc_reg(current,i,rs1[i]);
1827       alloc_all(current,i);
1828     }
1829     else {
1830       alloc_all(current,i); // FIXME: Keep r0
1831       current->u&=~1LL;
1832       alloc_reg(current,i,0);
1833     }
1834   }
1835   else
1836   {
1837     // TLBR/TLBWI/TLBWR/TLBP/ERET
1838     assert(opcode2[i]==0x10);
1839     alloc_all(current,i);
1840   }
1841   minimum_free_regs[i]=HOST_REGS;
1842 }
1843
1844 void cop1_alloc(struct regstat *current,int i)
1845 {
1846   alloc_reg(current,i,CSREG); // Load status
1847   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1848   {
1849     if(rt1[i]){
1850       clear_const(current,rt1[i]);
1851       if(opcode2[i]==1) {
1852         alloc_reg64(current,i,rt1[i]); // DMFC1
1853         current->is32&=~(1LL<<rt1[i]);
1854       }else{
1855         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1856         current->is32|=1LL<<rt1[i];
1857       }
1858       dirty_reg(current,rt1[i]);
1859     }
1860     alloc_reg_temp(current,i,-1);
1861   }
1862   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1863   {
1864     if(rs1[i]){
1865       clear_const(current,rs1[i]);
1866       if(opcode2[i]==5)
1867         alloc_reg64(current,i,rs1[i]); // DMTC1
1868       else
1869         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1870       alloc_reg_temp(current,i,-1);
1871     }
1872     else {
1873       current->u&=~1LL;
1874       alloc_reg(current,i,0);
1875       alloc_reg_temp(current,i,-1);
1876     }
1877   }
1878   minimum_free_regs[i]=1;
1879 }
1880 void fconv_alloc(struct regstat *current,int i)
1881 {
1882   alloc_reg(current,i,CSREG); // Load status
1883   alloc_reg_temp(current,i,-1);
1884   minimum_free_regs[i]=1;
1885 }
1886 void float_alloc(struct regstat *current,int i)
1887 {
1888   alloc_reg(current,i,CSREG); // Load status
1889   alloc_reg_temp(current,i,-1);
1890   minimum_free_regs[i]=1;
1891 }
1892 void c2op_alloc(struct regstat *current,int i)
1893 {
1894   alloc_reg_temp(current,i,-1);
1895 }
1896 void fcomp_alloc(struct regstat *current,int i)
1897 {
1898   alloc_reg(current,i,CSREG); // Load status
1899   alloc_reg(current,i,FSREG); // Load flags
1900   dirty_reg(current,FSREG); // Flag will be modified
1901   alloc_reg_temp(current,i,-1);
1902   minimum_free_regs[i]=1;
1903 }
1904
1905 void syscall_alloc(struct regstat *current,int i)
1906 {
1907   alloc_cc(current,i);
1908   dirty_reg(current,CCREG);
1909   alloc_all(current,i);
1910   minimum_free_regs[i]=HOST_REGS;
1911   current->isconst=0;
1912 }
1913
1914 void delayslot_alloc(struct regstat *current,int i)
1915 {
1916   switch(itype[i]) {
1917     case UJUMP:
1918     case CJUMP:
1919     case SJUMP:
1920     case RJUMP:
1921     case FJUMP:
1922     case SYSCALL:
1923     case HLECALL:
1924     case SPAN:
1925       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1926       SysPrintf("Disabled speculative precompilation\n");
1927       stop_after_jal=1;
1928       break;
1929     case IMM16:
1930       imm16_alloc(current,i);
1931       break;
1932     case LOAD:
1933     case LOADLR:
1934       load_alloc(current,i);
1935       break;
1936     case STORE:
1937     case STORELR:
1938       store_alloc(current,i);
1939       break;
1940     case ALU:
1941       alu_alloc(current,i);
1942       break;
1943     case SHIFT:
1944       shift_alloc(current,i);
1945       break;
1946     case MULTDIV:
1947       multdiv_alloc(current,i);
1948       break;
1949     case SHIFTIMM:
1950       shiftimm_alloc(current,i);
1951       break;
1952     case MOV:
1953       mov_alloc(current,i);
1954       break;
1955     case COP0:
1956       cop0_alloc(current,i);
1957       break;
1958     case COP1:
1959     case COP2:
1960       cop1_alloc(current,i);
1961       break;
1962     case C1LS:
1963       c1ls_alloc(current,i);
1964       break;
1965     case C2LS:
1966       c2ls_alloc(current,i);
1967       break;
1968     case FCONV:
1969       fconv_alloc(current,i);
1970       break;
1971     case FLOAT:
1972       float_alloc(current,i);
1973       break;
1974     case FCOMP:
1975       fcomp_alloc(current,i);
1976       break;
1977     case C2OP:
1978       c2op_alloc(current,i);
1979       break;
1980   }
1981 }
1982
1983 // Special case where a branch and delay slot span two pages in virtual memory
1984 static void pagespan_alloc(struct regstat *current,int i)
1985 {
1986   current->isconst=0;
1987   current->wasconst=0;
1988   regs[i].wasconst=0;
1989   minimum_free_regs[i]=HOST_REGS;
1990   alloc_all(current,i);
1991   alloc_cc(current,i);
1992   dirty_reg(current,CCREG);
1993   if(opcode[i]==3) // JAL
1994   {
1995     alloc_reg(current,i,31);
1996     dirty_reg(current,31);
1997   }
1998   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
1999   {
2000     alloc_reg(current,i,rs1[i]);
2001     if (rt1[i]!=0) {
2002       alloc_reg(current,i,rt1[i]);
2003       dirty_reg(current,rt1[i]);
2004     }
2005   }
2006   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2007   {
2008     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2009     if(rs2[i]) alloc_reg(current,i,rs2[i]);
2010     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
2011     {
2012       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2013       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
2014     }
2015   }
2016   else
2017   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2018   {
2019     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2020     if(!((current->is32>>rs1[i])&1))
2021     {
2022       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2023     }
2024   }
2025   else
2026   if(opcode[i]==0x11) // BC1
2027   {
2028     alloc_reg(current,i,FSREG);
2029     alloc_reg(current,i,CSREG);
2030   }
2031   //else ...
2032 }
2033
2034 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
2035 {
2036   stubs[stubcount][0]=type;
2037   stubs[stubcount][1]=addr;
2038   stubs[stubcount][2]=retaddr;
2039   stubs[stubcount][3]=a;
2040   stubs[stubcount][4]=b;
2041   stubs[stubcount][5]=c;
2042   stubs[stubcount][6]=d;
2043   stubs[stubcount][7]=e;
2044   stubcount++;
2045 }
2046
2047 // Write out a single register
2048 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2049 {
2050   int hr;
2051   for(hr=0;hr<HOST_REGS;hr++) {
2052     if(hr!=EXCLUDE_REG) {
2053       if((regmap[hr]&63)==r) {
2054         if((dirty>>hr)&1) {
2055           if(regmap[hr]<64) {
2056             emit_storereg(r,hr);
2057 #ifndef FORCE32
2058             if((is32>>regmap[hr])&1) {
2059               emit_sarimm(hr,31,hr);
2060               emit_storereg(r|64,hr);
2061             }
2062 #endif
2063           }else{
2064             emit_storereg(r|64,hr);
2065           }
2066         }
2067       }
2068     }
2069   }
2070 }
2071
2072 int mchecksum()
2073 {
2074   //if(!tracedebug) return 0;
2075   int i;
2076   int sum=0;
2077   for(i=0;i<2097152;i++) {
2078     unsigned int temp=sum;
2079     sum<<=1;
2080     sum|=(~temp)>>31;
2081     sum^=((u_int *)rdram)[i];
2082   }
2083   return sum;
2084 }
2085 int rchecksum()
2086 {
2087   int i;
2088   int sum=0;
2089   for(i=0;i<64;i++)
2090     sum^=((u_int *)reg)[i];
2091   return sum;
2092 }
2093 void rlist()
2094 {
2095   int i;
2096   printf("TRACE: ");
2097   for(i=0;i<32;i++)
2098     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2099   printf("\n");
2100 #ifndef DISABLE_COP1
2101   printf("TRACE: ");
2102   for(i=0;i<32;i++)
2103     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2104   printf("\n");
2105 #endif
2106 }
2107
2108 void enabletrace()
2109 {
2110   tracedebug=1;
2111 }
2112
2113 void memdebug(int i)
2114 {
2115   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2116   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2117   //rlist();
2118   //if(tracedebug) {
2119   //if(Count>=-2084597794) {
2120   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2121   //if(0) {
2122     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2123     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2124     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2125     rlist();
2126     #ifdef __i386__
2127     printf("TRACE: %x\n",(&i)[-1]);
2128     #endif
2129     #ifdef __arm__
2130     int j;
2131     printf("TRACE: %x \n",(&j)[10]);
2132     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2133     #endif
2134     //fflush(stdout);
2135   }
2136   //printf("TRACE: %x\n",(&i)[-1]);
2137 }
2138
2139 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2140 {
2141   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2142 }
2143
2144 void alu_assemble(int i,struct regstat *i_regs)
2145 {
2146   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2147     if(rt1[i]) {
2148       signed char s1,s2,t;
2149       t=get_reg(i_regs->regmap,rt1[i]);
2150       if(t>=0) {
2151         s1=get_reg(i_regs->regmap,rs1[i]);
2152         s2=get_reg(i_regs->regmap,rs2[i]);
2153         if(rs1[i]&&rs2[i]) {
2154           assert(s1>=0);
2155           assert(s2>=0);
2156           if(opcode2[i]&2) emit_sub(s1,s2,t);
2157           else emit_add(s1,s2,t);
2158         }
2159         else if(rs1[i]) {
2160           if(s1>=0) emit_mov(s1,t);
2161           else emit_loadreg(rs1[i],t);
2162         }
2163         else if(rs2[i]) {
2164           if(s2>=0) {
2165             if(opcode2[i]&2) emit_neg(s2,t);
2166             else emit_mov(s2,t);
2167           }
2168           else {
2169             emit_loadreg(rs2[i],t);
2170             if(opcode2[i]&2) emit_neg(t,t);
2171           }
2172         }
2173         else emit_zeroreg(t);
2174       }
2175     }
2176   }
2177   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2178     if(rt1[i]) {
2179       signed char s1l,s2l,s1h,s2h,tl,th;
2180       tl=get_reg(i_regs->regmap,rt1[i]);
2181       th=get_reg(i_regs->regmap,rt1[i]|64);
2182       if(tl>=0) {
2183         s1l=get_reg(i_regs->regmap,rs1[i]);
2184         s2l=get_reg(i_regs->regmap,rs2[i]);
2185         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2186         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2187         if(rs1[i]&&rs2[i]) {
2188           assert(s1l>=0);
2189           assert(s2l>=0);
2190           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2191           else emit_adds(s1l,s2l,tl);
2192           if(th>=0) {
2193             #ifdef INVERTED_CARRY
2194             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2195             #else
2196             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2197             #endif
2198             else emit_add(s1h,s2h,th);
2199           }
2200         }
2201         else if(rs1[i]) {
2202           if(s1l>=0) emit_mov(s1l,tl);
2203           else emit_loadreg(rs1[i],tl);
2204           if(th>=0) {
2205             if(s1h>=0) emit_mov(s1h,th);
2206             else emit_loadreg(rs1[i]|64,th);
2207           }
2208         }
2209         else if(rs2[i]) {
2210           if(s2l>=0) {
2211             if(opcode2[i]&2) emit_negs(s2l,tl);
2212             else emit_mov(s2l,tl);
2213           }
2214           else {
2215             emit_loadreg(rs2[i],tl);
2216             if(opcode2[i]&2) emit_negs(tl,tl);
2217           }
2218           if(th>=0) {
2219             #ifdef INVERTED_CARRY
2220             if(s2h>=0) emit_mov(s2h,th);
2221             else emit_loadreg(rs2[i]|64,th);
2222             if(opcode2[i]&2) {
2223               emit_adcimm(-1,th); // x86 has inverted carry flag
2224               emit_not(th,th);
2225             }
2226             #else
2227             if(opcode2[i]&2) {
2228               if(s2h>=0) emit_rscimm(s2h,0,th);
2229               else {
2230                 emit_loadreg(rs2[i]|64,th);
2231                 emit_rscimm(th,0,th);
2232               }
2233             }else{
2234               if(s2h>=0) emit_mov(s2h,th);
2235               else emit_loadreg(rs2[i]|64,th);
2236             }
2237             #endif
2238           }
2239         }
2240         else {
2241           emit_zeroreg(tl);
2242           if(th>=0) emit_zeroreg(th);
2243         }
2244       }
2245     }
2246   }
2247   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2248     if(rt1[i]) {
2249       signed char s1l,s1h,s2l,s2h,t;
2250       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2251       {
2252         t=get_reg(i_regs->regmap,rt1[i]);
2253         //assert(t>=0);
2254         if(t>=0) {
2255           s1l=get_reg(i_regs->regmap,rs1[i]);
2256           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2257           s2l=get_reg(i_regs->regmap,rs2[i]);
2258           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2259           if(rs2[i]==0) // rx<r0
2260           {
2261             assert(s1h>=0);
2262             if(opcode2[i]==0x2a) // SLT
2263               emit_shrimm(s1h,31,t);
2264             else // SLTU (unsigned can not be less than zero)
2265               emit_zeroreg(t);
2266           }
2267           else if(rs1[i]==0) // r0<rx
2268           {
2269             assert(s2h>=0);
2270             if(opcode2[i]==0x2a) // SLT
2271               emit_set_gz64_32(s2h,s2l,t);
2272             else // SLTU (set if not zero)
2273               emit_set_nz64_32(s2h,s2l,t);
2274           }
2275           else {
2276             assert(s1l>=0);assert(s1h>=0);
2277             assert(s2l>=0);assert(s2h>=0);
2278             if(opcode2[i]==0x2a) // SLT
2279               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2280             else // SLTU
2281               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2282           }
2283         }
2284       } else {
2285         t=get_reg(i_regs->regmap,rt1[i]);
2286         //assert(t>=0);
2287         if(t>=0) {
2288           s1l=get_reg(i_regs->regmap,rs1[i]);
2289           s2l=get_reg(i_regs->regmap,rs2[i]);
2290           if(rs2[i]==0) // rx<r0
2291           {
2292             assert(s1l>=0);
2293             if(opcode2[i]==0x2a) // SLT
2294               emit_shrimm(s1l,31,t);
2295             else // SLTU (unsigned can not be less than zero)
2296               emit_zeroreg(t);
2297           }
2298           else if(rs1[i]==0) // r0<rx
2299           {
2300             assert(s2l>=0);
2301             if(opcode2[i]==0x2a) // SLT
2302               emit_set_gz32(s2l,t);
2303             else // SLTU (set if not zero)
2304               emit_set_nz32(s2l,t);
2305           }
2306           else{
2307             assert(s1l>=0);assert(s2l>=0);
2308             if(opcode2[i]==0x2a) // SLT
2309               emit_set_if_less32(s1l,s2l,t);
2310             else // SLTU
2311               emit_set_if_carry32(s1l,s2l,t);
2312           }
2313         }
2314       }
2315     }
2316   }
2317   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2318     if(rt1[i]) {
2319       signed char s1l,s1h,s2l,s2h,th,tl;
2320       tl=get_reg(i_regs->regmap,rt1[i]);
2321       th=get_reg(i_regs->regmap,rt1[i]|64);
2322       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2323       {
2324         assert(tl>=0);
2325         if(tl>=0) {
2326           s1l=get_reg(i_regs->regmap,rs1[i]);
2327           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2328           s2l=get_reg(i_regs->regmap,rs2[i]);
2329           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2330           if(rs1[i]&&rs2[i]) {
2331             assert(s1l>=0);assert(s1h>=0);
2332             assert(s2l>=0);assert(s2h>=0);
2333             if(opcode2[i]==0x24) { // AND
2334               emit_and(s1l,s2l,tl);
2335               emit_and(s1h,s2h,th);
2336             } else
2337             if(opcode2[i]==0x25) { // OR
2338               emit_or(s1l,s2l,tl);
2339               emit_or(s1h,s2h,th);
2340             } else
2341             if(opcode2[i]==0x26) { // XOR
2342               emit_xor(s1l,s2l,tl);
2343               emit_xor(s1h,s2h,th);
2344             } else
2345             if(opcode2[i]==0x27) { // NOR
2346               emit_or(s1l,s2l,tl);
2347               emit_or(s1h,s2h,th);
2348               emit_not(tl,tl);
2349               emit_not(th,th);
2350             }
2351           }
2352           else
2353           {
2354             if(opcode2[i]==0x24) { // AND
2355               emit_zeroreg(tl);
2356               emit_zeroreg(th);
2357             } else
2358             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2359               if(rs1[i]){
2360                 if(s1l>=0) emit_mov(s1l,tl);
2361                 else emit_loadreg(rs1[i],tl);
2362                 if(s1h>=0) emit_mov(s1h,th);
2363                 else emit_loadreg(rs1[i]|64,th);
2364               }
2365               else
2366               if(rs2[i]){
2367                 if(s2l>=0) emit_mov(s2l,tl);
2368                 else emit_loadreg(rs2[i],tl);
2369                 if(s2h>=0) emit_mov(s2h,th);
2370                 else emit_loadreg(rs2[i]|64,th);
2371               }
2372               else{
2373                 emit_zeroreg(tl);
2374                 emit_zeroreg(th);
2375               }
2376             } else
2377             if(opcode2[i]==0x27) { // NOR
2378               if(rs1[i]){
2379                 if(s1l>=0) emit_not(s1l,tl);
2380                 else{
2381                   emit_loadreg(rs1[i],tl);
2382                   emit_not(tl,tl);
2383                 }
2384                 if(s1h>=0) emit_not(s1h,th);
2385                 else{
2386                   emit_loadreg(rs1[i]|64,th);
2387                   emit_not(th,th);
2388                 }
2389               }
2390               else
2391               if(rs2[i]){
2392                 if(s2l>=0) emit_not(s2l,tl);
2393                 else{
2394                   emit_loadreg(rs2[i],tl);
2395                   emit_not(tl,tl);
2396                 }
2397                 if(s2h>=0) emit_not(s2h,th);
2398                 else{
2399                   emit_loadreg(rs2[i]|64,th);
2400                   emit_not(th,th);
2401                 }
2402               }
2403               else {
2404                 emit_movimm(-1,tl);
2405                 emit_movimm(-1,th);
2406               }
2407             }
2408           }
2409         }
2410       }
2411       else
2412       {
2413         // 32 bit
2414         if(tl>=0) {
2415           s1l=get_reg(i_regs->regmap,rs1[i]);
2416           s2l=get_reg(i_regs->regmap,rs2[i]);
2417           if(rs1[i]&&rs2[i]) {
2418             assert(s1l>=0);
2419             assert(s2l>=0);
2420             if(opcode2[i]==0x24) { // AND
2421               emit_and(s1l,s2l,tl);
2422             } else
2423             if(opcode2[i]==0x25) { // OR
2424               emit_or(s1l,s2l,tl);
2425             } else
2426             if(opcode2[i]==0x26) { // XOR
2427               emit_xor(s1l,s2l,tl);
2428             } else
2429             if(opcode2[i]==0x27) { // NOR
2430               emit_or(s1l,s2l,tl);
2431               emit_not(tl,tl);
2432             }
2433           }
2434           else
2435           {
2436             if(opcode2[i]==0x24) { // AND
2437               emit_zeroreg(tl);
2438             } else
2439             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2440               if(rs1[i]){
2441                 if(s1l>=0) emit_mov(s1l,tl);
2442                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2443               }
2444               else
2445               if(rs2[i]){
2446                 if(s2l>=0) emit_mov(s2l,tl);
2447                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2448               }
2449               else emit_zeroreg(tl);
2450             } else
2451             if(opcode2[i]==0x27) { // NOR
2452               if(rs1[i]){
2453                 if(s1l>=0) emit_not(s1l,tl);
2454                 else {
2455                   emit_loadreg(rs1[i],tl);
2456                   emit_not(tl,tl);
2457                 }
2458               }
2459               else
2460               if(rs2[i]){
2461                 if(s2l>=0) emit_not(s2l,tl);
2462                 else {
2463                   emit_loadreg(rs2[i],tl);
2464                   emit_not(tl,tl);
2465                 }
2466               }
2467               else emit_movimm(-1,tl);
2468             }
2469           }
2470         }
2471       }
2472     }
2473   }
2474 }
2475
2476 void imm16_assemble(int i,struct regstat *i_regs)
2477 {
2478   if (opcode[i]==0x0f) { // LUI
2479     if(rt1[i]) {
2480       signed char t;
2481       t=get_reg(i_regs->regmap,rt1[i]);
2482       //assert(t>=0);
2483       if(t>=0) {
2484         if(!((i_regs->isconst>>t)&1))
2485           emit_movimm(imm[i]<<16,t);
2486       }
2487     }
2488   }
2489   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2490     if(rt1[i]) {
2491       signed char s,t;
2492       t=get_reg(i_regs->regmap,rt1[i]);
2493       s=get_reg(i_regs->regmap,rs1[i]);
2494       if(rs1[i]) {
2495         //assert(t>=0);
2496         //assert(s>=0);
2497         if(t>=0) {
2498           if(!((i_regs->isconst>>t)&1)) {
2499             if(s<0) {
2500               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2501               emit_addimm(t,imm[i],t);
2502             }else{
2503               if(!((i_regs->wasconst>>s)&1))
2504                 emit_addimm(s,imm[i],t);
2505               else
2506                 emit_movimm(constmap[i][s]+imm[i],t);
2507             }
2508           }
2509         }
2510       } else {
2511         if(t>=0) {
2512           if(!((i_regs->isconst>>t)&1))
2513             emit_movimm(imm[i],t);
2514         }
2515       }
2516     }
2517   }
2518   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2519     if(rt1[i]) {
2520       signed char sh,sl,th,tl;
2521       th=get_reg(i_regs->regmap,rt1[i]|64);
2522       tl=get_reg(i_regs->regmap,rt1[i]);
2523       sh=get_reg(i_regs->regmap,rs1[i]|64);
2524       sl=get_reg(i_regs->regmap,rs1[i]);
2525       if(tl>=0) {
2526         if(rs1[i]) {
2527           assert(sh>=0);
2528           assert(sl>=0);
2529           if(th>=0) {
2530             emit_addimm64_32(sh,sl,imm[i],th,tl);
2531           }
2532           else {
2533             emit_addimm(sl,imm[i],tl);
2534           }
2535         } else {
2536           emit_movimm(imm[i],tl);
2537           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2538         }
2539       }
2540     }
2541   }
2542   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2543     if(rt1[i]) {
2544       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2545       signed char sh,sl,t;
2546       t=get_reg(i_regs->regmap,rt1[i]);
2547       sh=get_reg(i_regs->regmap,rs1[i]|64);
2548       sl=get_reg(i_regs->regmap,rs1[i]);
2549       //assert(t>=0);
2550       if(t>=0) {
2551         if(rs1[i]>0) {
2552           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2553           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2554             if(opcode[i]==0x0a) { // SLTI
2555               if(sl<0) {
2556                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2557                 emit_slti32(t,imm[i],t);
2558               }else{
2559                 emit_slti32(sl,imm[i],t);
2560               }
2561             }
2562             else { // SLTIU
2563               if(sl<0) {
2564                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2565                 emit_sltiu32(t,imm[i],t);
2566               }else{
2567                 emit_sltiu32(sl,imm[i],t);
2568               }
2569             }
2570           }else{ // 64-bit
2571             assert(sl>=0);
2572             if(opcode[i]==0x0a) // SLTI
2573               emit_slti64_32(sh,sl,imm[i],t);
2574             else // SLTIU
2575               emit_sltiu64_32(sh,sl,imm[i],t);
2576           }
2577         }else{
2578           // SLTI(U) with r0 is just stupid,
2579           // nonetheless examples can be found
2580           if(opcode[i]==0x0a) // SLTI
2581             if(0<imm[i]) emit_movimm(1,t);
2582             else emit_zeroreg(t);
2583           else // SLTIU
2584           {
2585             if(imm[i]) emit_movimm(1,t);
2586             else emit_zeroreg(t);
2587           }
2588         }
2589       }
2590     }
2591   }
2592   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2593     if(rt1[i]) {
2594       signed char sh,sl,th,tl;
2595       th=get_reg(i_regs->regmap,rt1[i]|64);
2596       tl=get_reg(i_regs->regmap,rt1[i]);
2597       sh=get_reg(i_regs->regmap,rs1[i]|64);
2598       sl=get_reg(i_regs->regmap,rs1[i]);
2599       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2600         if(opcode[i]==0x0c) //ANDI
2601         {
2602           if(rs1[i]) {
2603             if(sl<0) {
2604               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2605               emit_andimm(tl,imm[i],tl);
2606             }else{
2607               if(!((i_regs->wasconst>>sl)&1))
2608                 emit_andimm(sl,imm[i],tl);
2609               else
2610                 emit_movimm(constmap[i][sl]&imm[i],tl);
2611             }
2612           }
2613           else
2614             emit_zeroreg(tl);
2615           if(th>=0) emit_zeroreg(th);
2616         }
2617         else
2618         {
2619           if(rs1[i]) {
2620             if(sl<0) {
2621               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2622             }
2623             if(th>=0) {
2624               if(sh<0) {
2625                 emit_loadreg(rs1[i]|64,th);
2626               }else{
2627                 emit_mov(sh,th);
2628               }
2629             }
2630             if(opcode[i]==0x0d) //ORI
2631             if(sl<0) {
2632               emit_orimm(tl,imm[i],tl);
2633             }else{
2634               if(!((i_regs->wasconst>>sl)&1))
2635                 emit_orimm(sl,imm[i],tl);
2636               else
2637                 emit_movimm(constmap[i][sl]|imm[i],tl);
2638             }
2639             if(opcode[i]==0x0e) //XORI
2640             if(sl<0) {
2641               emit_xorimm(tl,imm[i],tl);
2642             }else{
2643               if(!((i_regs->wasconst>>sl)&1))
2644                 emit_xorimm(sl,imm[i],tl);
2645               else
2646                 emit_movimm(constmap[i][sl]^imm[i],tl);
2647             }
2648           }
2649           else {
2650             emit_movimm(imm[i],tl);
2651             if(th>=0) emit_zeroreg(th);
2652           }
2653         }
2654       }
2655     }
2656   }
2657 }
2658
2659 void shiftimm_assemble(int i,struct regstat *i_regs)
2660 {
2661   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2662   {
2663     if(rt1[i]) {
2664       signed char s,t;
2665       t=get_reg(i_regs->regmap,rt1[i]);
2666       s=get_reg(i_regs->regmap,rs1[i]);
2667       //assert(t>=0);
2668       if(t>=0&&!((i_regs->isconst>>t)&1)){
2669         if(rs1[i]==0)
2670         {
2671           emit_zeroreg(t);
2672         }
2673         else
2674         {
2675           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2676           if(imm[i]) {
2677             if(opcode2[i]==0) // SLL
2678             {
2679               emit_shlimm(s<0?t:s,imm[i],t);
2680             }
2681             if(opcode2[i]==2) // SRL
2682             {
2683               emit_shrimm(s<0?t:s,imm[i],t);
2684             }
2685             if(opcode2[i]==3) // SRA
2686             {
2687               emit_sarimm(s<0?t:s,imm[i],t);
2688             }
2689           }else{
2690             // Shift by zero
2691             if(s>=0 && s!=t) emit_mov(s,t);
2692           }
2693         }
2694       }
2695       //emit_storereg(rt1[i],t); //DEBUG
2696     }
2697   }
2698   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2699   {
2700     if(rt1[i]) {
2701       signed char sh,sl,th,tl;
2702       th=get_reg(i_regs->regmap,rt1[i]|64);
2703       tl=get_reg(i_regs->regmap,rt1[i]);
2704       sh=get_reg(i_regs->regmap,rs1[i]|64);
2705       sl=get_reg(i_regs->regmap,rs1[i]);
2706       if(tl>=0) {
2707         if(rs1[i]==0)
2708         {
2709           emit_zeroreg(tl);
2710           if(th>=0) emit_zeroreg(th);
2711         }
2712         else
2713         {
2714           assert(sl>=0);
2715           assert(sh>=0);
2716           if(imm[i]) {
2717             if(opcode2[i]==0x38) // DSLL
2718             {
2719               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2720               emit_shlimm(sl,imm[i],tl);
2721             }
2722             if(opcode2[i]==0x3a) // DSRL
2723             {
2724               emit_shrdimm(sl,sh,imm[i],tl);
2725               if(th>=0) emit_shrimm(sh,imm[i],th);
2726             }
2727             if(opcode2[i]==0x3b) // DSRA
2728             {
2729               emit_shrdimm(sl,sh,imm[i],tl);
2730               if(th>=0) emit_sarimm(sh,imm[i],th);
2731             }
2732           }else{
2733             // Shift by zero
2734             if(sl!=tl) emit_mov(sl,tl);
2735             if(th>=0&&sh!=th) emit_mov(sh,th);
2736           }
2737         }
2738       }
2739     }
2740   }
2741   if(opcode2[i]==0x3c) // DSLL32
2742   {
2743     if(rt1[i]) {
2744       signed char sl,tl,th;
2745       tl=get_reg(i_regs->regmap,rt1[i]);
2746       th=get_reg(i_regs->regmap,rt1[i]|64);
2747       sl=get_reg(i_regs->regmap,rs1[i]);
2748       if(th>=0||tl>=0){
2749         assert(tl>=0);
2750         assert(th>=0);
2751         assert(sl>=0);
2752         emit_mov(sl,th);
2753         emit_zeroreg(tl);
2754         if(imm[i]>32)
2755         {
2756           emit_shlimm(th,imm[i]&31,th);
2757         }
2758       }
2759     }
2760   }
2761   if(opcode2[i]==0x3e) // DSRL32
2762   {
2763     if(rt1[i]) {
2764       signed char sh,tl,th;
2765       tl=get_reg(i_regs->regmap,rt1[i]);
2766       th=get_reg(i_regs->regmap,rt1[i]|64);
2767       sh=get_reg(i_regs->regmap,rs1[i]|64);
2768       if(tl>=0){
2769         assert(sh>=0);
2770         emit_mov(sh,tl);
2771         if(th>=0) emit_zeroreg(th);
2772         if(imm[i]>32)
2773         {
2774           emit_shrimm(tl,imm[i]&31,tl);
2775         }
2776       }
2777     }
2778   }
2779   if(opcode2[i]==0x3f) // DSRA32
2780   {
2781     if(rt1[i]) {
2782       signed char sh,tl;
2783       tl=get_reg(i_regs->regmap,rt1[i]);
2784       sh=get_reg(i_regs->regmap,rs1[i]|64);
2785       if(tl>=0){
2786         assert(sh>=0);
2787         emit_mov(sh,tl);
2788         if(imm[i]>32)
2789         {
2790           emit_sarimm(tl,imm[i]&31,tl);
2791         }
2792       }
2793     }
2794   }
2795 }
2796
2797 #ifndef shift_assemble
2798 void shift_assemble(int i,struct regstat *i_regs)
2799 {
2800   printf("Need shift_assemble for this architecture.\n");
2801   exit(1);
2802 }
2803 #endif
2804
2805 void load_assemble(int i,struct regstat *i_regs)
2806 {
2807   int s,th,tl,addr,map=-1;
2808   int offset;
2809   int jaddr=0;
2810   int memtarget=0,c=0;
2811   int fastload_reg_override=0;
2812   u_int hr,reglist=0;
2813   th=get_reg(i_regs->regmap,rt1[i]|64);
2814   tl=get_reg(i_regs->regmap,rt1[i]);
2815   s=get_reg(i_regs->regmap,rs1[i]);
2816   offset=imm[i];
2817   for(hr=0;hr<HOST_REGS;hr++) {
2818     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2819   }
2820   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2821   if(s>=0) {
2822     c=(i_regs->wasconst>>s)&1;
2823     if (c) {
2824       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2825       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2826     }
2827   }
2828   //printf("load_assemble: c=%d\n",c);
2829   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2830   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2831 #ifdef PCSX
2832   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2833     ||rt1[i]==0) {
2834       // could be FIFO, must perform the read
2835       // ||dummy read
2836       assem_debug("(forced read)\n");
2837       tl=get_reg(i_regs->regmap,-1);
2838       assert(tl>=0);
2839   }
2840 #endif
2841   if(offset||s<0||c) addr=tl;
2842   else addr=s;
2843   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2844  if(tl>=0) {
2845   //printf("load_assemble: c=%d\n",c);
2846   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2847   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2848   reglist&=~(1<<tl);
2849   if(th>=0) reglist&=~(1<<th);
2850   if(!using_tlb) {
2851     if(!c) {
2852       #ifdef RAM_OFFSET
2853       map=get_reg(i_regs->regmap,ROREG);
2854       if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2855       #endif
2856 //#define R29_HACK 1
2857       #ifdef R29_HACK
2858       // Strmnnrmn's speed hack
2859       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2860       #endif
2861       {
2862         jaddr=emit_fastpath_cmp_jump(i,addr,&fastload_reg_override);
2863       }
2864     }
2865     else if(ram_offset&&memtarget) {
2866       emit_addimm(addr,ram_offset,HOST_TEMPREG);
2867       fastload_reg_override=HOST_TEMPREG;
2868     }
2869   }else{ // using tlb
2870     int x=0;
2871     if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2872     if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2873     map=get_reg(i_regs->regmap,TLREG);
2874     assert(map>=0);
2875     reglist&=~(1<<map);
2876     map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2877     do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2878   }
2879   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2880   if (opcode[i]==0x20) { // LB
2881     if(!c||memtarget) {
2882       if(!dummy) {
2883         #ifdef HOST_IMM_ADDR32
2884         if(c)
2885           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2886         else
2887         #endif
2888         {
2889           //emit_xorimm(addr,3,tl);
2890           //gen_tlb_addr_r(tl,map);
2891           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2892           int x=0,a=tl;
2893 #ifdef BIG_ENDIAN_MIPS
2894           if(!c) emit_xorimm(addr,3,tl);
2895           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2896 #else
2897           if(!c) a=addr;
2898 #endif
2899           if(fastload_reg_override) a=fastload_reg_override;
2900
2901           emit_movsbl_indexed_tlb(x,a,map,tl);
2902         }
2903       }
2904       if(jaddr)
2905         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2906     }
2907     else
2908       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2909   }
2910   if (opcode[i]==0x21) { // LH
2911     if(!c||memtarget) {
2912       if(!dummy) {
2913         #ifdef HOST_IMM_ADDR32
2914         if(c)
2915           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2916         else
2917         #endif
2918         {
2919           int x=0,a=tl;
2920 #ifdef BIG_ENDIAN_MIPS
2921           if(!c) emit_xorimm(addr,2,tl);
2922           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2923 #else
2924           if(!c) a=addr;
2925 #endif
2926           if(fastload_reg_override) a=fastload_reg_override;
2927           //#ifdef
2928           //emit_movswl_indexed_tlb(x,tl,map,tl);
2929           //else
2930           if(map>=0) {
2931             gen_tlb_addr_r(a,map);
2932             emit_movswl_indexed(x,a,tl);
2933           }else{
2934             #if 1 //def RAM_OFFSET
2935             emit_movswl_indexed(x,a,tl);
2936             #else
2937             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2938             #endif
2939           }
2940         }
2941       }
2942       if(jaddr)
2943         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2944     }
2945     else
2946       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2947   }
2948   if (opcode[i]==0x23) { // LW
2949     if(!c||memtarget) {
2950       if(!dummy) {
2951         int a=addr;
2952         if(fastload_reg_override) a=fastload_reg_override;
2953         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2954         #ifdef HOST_IMM_ADDR32
2955         if(c)
2956           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2957         else
2958         #endif
2959         emit_readword_indexed_tlb(0,a,map,tl);
2960       }
2961       if(jaddr)
2962         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2963     }
2964     else
2965       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2966   }
2967   if (opcode[i]==0x24) { // LBU
2968     if(!c||memtarget) {
2969       if(!dummy) {
2970         #ifdef HOST_IMM_ADDR32
2971         if(c)
2972           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
2973         else
2974         #endif
2975         {
2976           //emit_xorimm(addr,3,tl);
2977           //gen_tlb_addr_r(tl,map);
2978           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
2979           int x=0,a=tl;
2980 #ifdef BIG_ENDIAN_MIPS
2981           if(!c) emit_xorimm(addr,3,tl);
2982           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2983 #else
2984           if(!c) a=addr;
2985 #endif
2986           if(fastload_reg_override) a=fastload_reg_override;
2987
2988           emit_movzbl_indexed_tlb(x,a,map,tl);
2989         }
2990       }
2991       if(jaddr)
2992         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2993     }
2994     else
2995       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2996   }
2997   if (opcode[i]==0x25) { // LHU
2998     if(!c||memtarget) {
2999       if(!dummy) {
3000         #ifdef HOST_IMM_ADDR32
3001         if(c)
3002           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
3003         else
3004         #endif
3005         {
3006           int x=0,a=tl;
3007 #ifdef BIG_ENDIAN_MIPS
3008           if(!c) emit_xorimm(addr,2,tl);
3009           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3010 #else
3011           if(!c) a=addr;
3012 #endif
3013           if(fastload_reg_override) a=fastload_reg_override;
3014           //#ifdef
3015           //emit_movzwl_indexed_tlb(x,tl,map,tl);
3016           //#else
3017           if(map>=0) {
3018             gen_tlb_addr_r(a,map);
3019             emit_movzwl_indexed(x,a,tl);
3020           }else{
3021             #if 1 //def RAM_OFFSET
3022             emit_movzwl_indexed(x,a,tl);
3023             #else
3024             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
3025             #endif
3026           }
3027         }
3028       }
3029       if(jaddr)
3030         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3031     }
3032     else
3033       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3034   }
3035   if (opcode[i]==0x27) { // LWU
3036     assert(th>=0);
3037     if(!c||memtarget) {
3038       if(!dummy) {
3039         int a=addr;
3040         if(fastload_reg_override) a=fastload_reg_override;
3041         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3042         #ifdef HOST_IMM_ADDR32
3043         if(c)
3044           emit_readword_tlb(constmap[i][s]+offset,map,tl);
3045         else
3046         #endif
3047         emit_readword_indexed_tlb(0,a,map,tl);
3048       }
3049       if(jaddr)
3050         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3051     }
3052     else {
3053       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3054     }
3055     emit_zeroreg(th);
3056   }
3057   if (opcode[i]==0x37) { // LD
3058     if(!c||memtarget) {
3059       if(!dummy) {
3060         int a=addr;
3061         if(fastload_reg_override) a=fastload_reg_override;
3062         //gen_tlb_addr_r(tl,map);
3063         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3064         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3065         #ifdef HOST_IMM_ADDR32
3066         if(c)
3067           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3068         else
3069         #endif
3070         emit_readdword_indexed_tlb(0,a,map,th,tl);
3071       }
3072       if(jaddr)
3073         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3074     }
3075     else
3076       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3077   }
3078  }
3079   //emit_storereg(rt1[i],tl); // DEBUG
3080   //if(opcode[i]==0x23)
3081   //if(opcode[i]==0x24)
3082   //if(opcode[i]==0x23||opcode[i]==0x24)
3083   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3084   {
3085     //emit_pusha();
3086     save_regs(0x100f);
3087         emit_readword((int)&last_count,ECX);
3088         #ifdef __i386__
3089         if(get_reg(i_regs->regmap,CCREG)<0)
3090           emit_loadreg(CCREG,HOST_CCREG);
3091         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3092         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3093         emit_writeword(HOST_CCREG,(int)&Count);
3094         #endif
3095         #ifdef __arm__
3096         if(get_reg(i_regs->regmap,CCREG)<0)
3097           emit_loadreg(CCREG,0);
3098         else
3099           emit_mov(HOST_CCREG,0);
3100         emit_add(0,ECX,0);
3101         emit_addimm(0,2*ccadj[i],0);
3102         emit_writeword(0,(int)&Count);
3103         #endif
3104     emit_call((int)memdebug);
3105     //emit_popa();
3106     restore_regs(0x100f);
3107   }/**/
3108 }
3109
3110 #ifndef loadlr_assemble
3111 void loadlr_assemble(int i,struct regstat *i_regs)
3112 {
3113   printf("Need loadlr_assemble for this architecture.\n");
3114   exit(1);
3115 }
3116 #endif
3117
3118 void store_assemble(int i,struct regstat *i_regs)
3119 {
3120   int s,th,tl,map=-1;
3121   int addr,temp;
3122   int offset;
3123   int jaddr=0,jaddr2,type;
3124   int memtarget=0,c=0;
3125   int agr=AGEN1+(i&1);
3126   int faststore_reg_override=0;
3127   u_int hr,reglist=0;
3128   th=get_reg(i_regs->regmap,rs2[i]|64);
3129   tl=get_reg(i_regs->regmap,rs2[i]);
3130   s=get_reg(i_regs->regmap,rs1[i]);
3131   temp=get_reg(i_regs->regmap,agr);
3132   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3133   offset=imm[i];
3134   if(s>=0) {
3135     c=(i_regs->wasconst>>s)&1;
3136     if(c) {
3137       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3138       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3139     }
3140   }
3141   assert(tl>=0);
3142   assert(temp>=0);
3143   for(hr=0;hr<HOST_REGS;hr++) {
3144     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3145   }
3146   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3147   if(offset||s<0||c) addr=temp;
3148   else addr=s;
3149   if(!using_tlb) {
3150     if(!c) {
3151       #ifndef PCSX
3152       #ifdef R29_HACK
3153       // Strmnnrmn's speed hack
3154       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3155       #endif
3156       emit_cmpimm(addr,RAM_SIZE);
3157       #ifdef DESTRUCTIVE_SHIFT
3158       if(s==addr) emit_mov(s,temp);
3159       #endif
3160       #ifdef R29_HACK
3161       memtarget=1;
3162       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3163       #endif
3164       {
3165         jaddr=(int)out;
3166         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3167         // Hint to branch predictor that the branch is unlikely to be taken
3168         if(rs1[i]>=28)
3169           emit_jno_unlikely(0);
3170         else
3171         #endif
3172         emit_jno(0);
3173       }
3174       #else
3175         jaddr=emit_fastpath_cmp_jump(i,addr,&faststore_reg_override);
3176       #endif
3177     }
3178     else if(ram_offset&&memtarget) {
3179       emit_addimm(addr,ram_offset,HOST_TEMPREG);
3180       faststore_reg_override=HOST_TEMPREG;
3181     }
3182   }else{ // using tlb
3183     int x=0;
3184     if (opcode[i]==0x28) x=3; // SB
3185     if (opcode[i]==0x29) x=2; // SH
3186     map=get_reg(i_regs->regmap,TLREG);
3187     assert(map>=0);
3188     reglist&=~(1<<map);
3189     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3190     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3191   }
3192
3193   if (opcode[i]==0x28) { // SB
3194     if(!c||memtarget) {
3195       int x=0,a=temp;
3196 #ifdef BIG_ENDIAN_MIPS
3197       if(!c) emit_xorimm(addr,3,temp);
3198       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3199 #else
3200       if(!c) a=addr;
3201 #endif
3202       if(faststore_reg_override) a=faststore_reg_override;
3203       //gen_tlb_addr_w(temp,map);
3204       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3205       emit_writebyte_indexed_tlb(tl,x,a,map,a);
3206     }
3207     type=STOREB_STUB;
3208   }
3209   if (opcode[i]==0x29) { // SH
3210     if(!c||memtarget) {
3211       int x=0,a=temp;
3212 #ifdef BIG_ENDIAN_MIPS
3213       if(!c) emit_xorimm(addr,2,temp);
3214       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3215 #else
3216       if(!c) a=addr;
3217 #endif
3218       if(faststore_reg_override) a=faststore_reg_override;
3219       //#ifdef
3220       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3221       //#else
3222       if(map>=0) {
3223         gen_tlb_addr_w(a,map);
3224         emit_writehword_indexed(tl,x,a);
3225       }else
3226         //emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
3227         emit_writehword_indexed(tl,x,a);
3228     }
3229     type=STOREH_STUB;
3230   }
3231   if (opcode[i]==0x2B) { // SW
3232     if(!c||memtarget) {
3233       int a=addr;
3234       if(faststore_reg_override) a=faststore_reg_override;
3235       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3236       emit_writeword_indexed_tlb(tl,0,a,map,temp);
3237     }
3238     type=STOREW_STUB;
3239   }
3240   if (opcode[i]==0x3F) { // SD
3241     if(!c||memtarget) {
3242       int a=addr;
3243       if(faststore_reg_override) a=faststore_reg_override;
3244       if(rs2[i]) {
3245         assert(th>=0);
3246         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3247         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3248         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
3249       }else{
3250         // Store zero
3251         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3252         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3253         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
3254       }
3255     }
3256     type=STORED_STUB;
3257   }
3258 #ifdef PCSX
3259   if(jaddr) {
3260     // PCSX store handlers don't check invcode again
3261     reglist|=1<<addr;
3262     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3263     jaddr=0;
3264   }
3265 #endif
3266   if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3267     if(!c||memtarget) {
3268       #ifdef DESTRUCTIVE_SHIFT
3269       // The x86 shift operation is 'destructive'; it overwrites the
3270       // source register, so we need to make a copy first and use that.
3271       addr=temp;
3272       #endif
3273       #if defined(HOST_IMM8)
3274       int ir=get_reg(i_regs->regmap,INVCP);
3275       assert(ir>=0);
3276       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3277       #else
3278       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3279       #endif
3280       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3281       emit_callne(invalidate_addr_reg[addr]);
3282       #else
3283       jaddr2=(int)out;
3284       emit_jne(0);
3285       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3286       #endif
3287     }
3288   }
3289   u_int addr_val=constmap[i][s]+offset;
3290   if(jaddr) {
3291     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3292   } else if(c&&!memtarget) {
3293     inline_writestub(type,i,addr_val,i_regs->regmap,rs2[i],ccadj[i],reglist);
3294   }
3295   // basic current block modification detection..
3296   // not looking back as that should be in mips cache already
3297   if(c&&start+i*4<addr_val&&addr_val<start+slen*4) {
3298     SysPrintf("write to %08x hits block %08x, pc=%08x\n",addr_val,start,start+i*4);
3299     assert(i_regs->regmap==regs[i].regmap); // not delay slot
3300     if(i_regs->regmap==regs[i].regmap) {
3301       load_all_consts(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty,i);
3302       wb_dirtys(regs[i].regmap_entry,regs[i].was32,regs[i].wasdirty);
3303       emit_movimm(start+i*4+4,0);
3304       emit_writeword(0,(int)&pcaddr);
3305       emit_jmp((int)do_interrupt);
3306     }
3307   }
3308   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3309   //if(opcode[i]==0x2B || opcode[i]==0x28)
3310   //if(opcode[i]==0x2B || opcode[i]==0x29)
3311   //if(opcode[i]==0x2B)
3312   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3313   {
3314     #ifdef __i386__
3315     emit_pusha();
3316     #endif
3317     #ifdef __arm__
3318     save_regs(0x100f);
3319     #endif
3320         emit_readword((int)&last_count,ECX);
3321         #ifdef __i386__
3322         if(get_reg(i_regs->regmap,CCREG)<0)
3323           emit_loadreg(CCREG,HOST_CCREG);
3324         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3325         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3326         emit_writeword(HOST_CCREG,(int)&Count);
3327         #endif
3328         #ifdef __arm__
3329         if(get_reg(i_regs->regmap,CCREG)<0)
3330           emit_loadreg(CCREG,0);
3331         else
3332           emit_mov(HOST_CCREG,0);
3333         emit_add(0,ECX,0);
3334         emit_addimm(0,2*ccadj[i],0);
3335         emit_writeword(0,(int)&Count);
3336         #endif
3337     emit_call((int)memdebug);
3338     #ifdef __i386__
3339     emit_popa();
3340     #endif
3341     #ifdef __arm__
3342     restore_regs(0x100f);
3343     #endif
3344   }/**/
3345 }
3346
3347 void storelr_assemble(int i,struct regstat *i_regs)
3348 {
3349   int s,th,tl;
3350   int temp;
3351   int temp2;
3352   int offset;
3353   int jaddr=0,jaddr2;
3354   int case1,case2,case3;
3355   int done0,done1,done2;
3356   int memtarget=0,c=0;
3357   int agr=AGEN1+(i&1);
3358   u_int hr,reglist=0;
3359   th=get_reg(i_regs->regmap,rs2[i]|64);
3360   tl=get_reg(i_regs->regmap,rs2[i]);
3361   s=get_reg(i_regs->regmap,rs1[i]);
3362   temp=get_reg(i_regs->regmap,agr);
3363   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3364   offset=imm[i];
3365   if(s>=0) {
3366     c=(i_regs->isconst>>s)&1;
3367     if(c) {
3368       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3369       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3370     }
3371   }
3372   assert(tl>=0);
3373   for(hr=0;hr<HOST_REGS;hr++) {
3374     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3375   }
3376   assert(temp>=0);
3377   if(!using_tlb) {
3378     if(!c) {
3379       emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3380       if(!offset&&s!=temp) emit_mov(s,temp);
3381       jaddr=(int)out;
3382       emit_jno(0);
3383     }
3384     else
3385     {
3386       if(!memtarget||!rs1[i]) {
3387         jaddr=(int)out;
3388         emit_jmp(0);
3389       }
3390     }
3391     #ifdef RAM_OFFSET
3392     int map=get_reg(i_regs->regmap,ROREG);
3393     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3394     gen_tlb_addr_w(temp,map);
3395     #else
3396     if((u_int)rdram!=0x80000000)
3397       emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3398     #endif
3399   }else{ // using tlb
3400     int map=get_reg(i_regs->regmap,TLREG);
3401     assert(map>=0);
3402     reglist&=~(1<<map);
3403     map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3404     if(!c&&!offset&&s>=0) emit_mov(s,temp);
3405     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3406     if(!jaddr&&!memtarget) {
3407       jaddr=(int)out;
3408       emit_jmp(0);
3409     }
3410     gen_tlb_addr_w(temp,map);
3411   }
3412
3413   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3414     temp2=get_reg(i_regs->regmap,FTEMP);
3415     if(!rs2[i]) temp2=th=tl;
3416   }
3417
3418 #ifndef BIG_ENDIAN_MIPS
3419     emit_xorimm(temp,3,temp);
3420 #endif
3421   emit_testimm(temp,2);
3422   case2=(int)out;
3423   emit_jne(0);
3424   emit_testimm(temp,1);
3425   case1=(int)out;
3426   emit_jne(0);
3427   // 0
3428   if (opcode[i]==0x2A) { // SWL
3429     emit_writeword_indexed(tl,0,temp);
3430   }
3431   if (opcode[i]==0x2E) { // SWR
3432     emit_writebyte_indexed(tl,3,temp);
3433   }
3434   if (opcode[i]==0x2C) { // SDL
3435     emit_writeword_indexed(th,0,temp);
3436     if(rs2[i]) emit_mov(tl,temp2);
3437   }
3438   if (opcode[i]==0x2D) { // SDR
3439     emit_writebyte_indexed(tl,3,temp);
3440     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3441   }
3442   done0=(int)out;
3443   emit_jmp(0);
3444   // 1
3445   set_jump_target(case1,(int)out);
3446   if (opcode[i]==0x2A) { // SWL
3447     // Write 3 msb into three least significant bytes
3448     if(rs2[i]) emit_rorimm(tl,8,tl);
3449     emit_writehword_indexed(tl,-1,temp);
3450     if(rs2[i]) emit_rorimm(tl,16,tl);
3451     emit_writebyte_indexed(tl,1,temp);
3452     if(rs2[i]) emit_rorimm(tl,8,tl);
3453   }
3454   if (opcode[i]==0x2E) { // SWR
3455     // Write two lsb into two most significant bytes
3456     emit_writehword_indexed(tl,1,temp);
3457   }
3458   if (opcode[i]==0x2C) { // SDL
3459     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3460     // Write 3 msb into three least significant bytes
3461     if(rs2[i]) emit_rorimm(th,8,th);
3462     emit_writehword_indexed(th,-1,temp);
3463     if(rs2[i]) emit_rorimm(th,16,th);
3464     emit_writebyte_indexed(th,1,temp);
3465     if(rs2[i]) emit_rorimm(th,8,th);
3466   }
3467   if (opcode[i]==0x2D) { // SDR
3468     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3469     // Write two lsb into two most significant bytes
3470     emit_writehword_indexed(tl,1,temp);
3471   }
3472   done1=(int)out;
3473   emit_jmp(0);
3474   // 2
3475   set_jump_target(case2,(int)out);
3476   emit_testimm(temp,1);
3477   case3=(int)out;
3478   emit_jne(0);
3479   if (opcode[i]==0x2A) { // SWL
3480     // Write two msb into two least significant bytes
3481     if(rs2[i]) emit_rorimm(tl,16,tl);
3482     emit_writehword_indexed(tl,-2,temp);
3483     if(rs2[i]) emit_rorimm(tl,16,tl);
3484   }
3485   if (opcode[i]==0x2E) { // SWR
3486     // Write 3 lsb into three most significant bytes
3487     emit_writebyte_indexed(tl,-1,temp);
3488     if(rs2[i]) emit_rorimm(tl,8,tl);
3489     emit_writehword_indexed(tl,0,temp);
3490     if(rs2[i]) emit_rorimm(tl,24,tl);
3491   }
3492   if (opcode[i]==0x2C) { // SDL
3493     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3494     // Write two msb into two least significant bytes
3495     if(rs2[i]) emit_rorimm(th,16,th);
3496     emit_writehword_indexed(th,-2,temp);
3497     if(rs2[i]) emit_rorimm(th,16,th);
3498   }
3499   if (opcode[i]==0x2D) { // SDR
3500     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3501     // Write 3 lsb into three most significant bytes
3502     emit_writebyte_indexed(tl,-1,temp);
3503     if(rs2[i]) emit_rorimm(tl,8,tl);
3504     emit_writehword_indexed(tl,0,temp);
3505     if(rs2[i]) emit_rorimm(tl,24,tl);
3506   }
3507   done2=(int)out;
3508   emit_jmp(0);
3509   // 3
3510   set_jump_target(case3,(int)out);
3511   if (opcode[i]==0x2A) { // SWL
3512     // Write msb into least significant byte
3513     if(rs2[i]) emit_rorimm(tl,24,tl);
3514     emit_writebyte_indexed(tl,-3,temp);
3515     if(rs2[i]) emit_rorimm(tl,8,tl);
3516   }
3517   if (opcode[i]==0x2E) { // SWR
3518     // Write entire word
3519     emit_writeword_indexed(tl,-3,temp);
3520   }
3521   if (opcode[i]==0x2C) { // SDL
3522     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3523     // Write msb into least significant byte
3524     if(rs2[i]) emit_rorimm(th,24,th);
3525     emit_writebyte_indexed(th,-3,temp);
3526     if(rs2[i]) emit_rorimm(th,8,th);
3527   }
3528   if (opcode[i]==0x2D) { // SDR
3529     if(rs2[i]) emit_mov(th,temp2);
3530     // Write entire word
3531     emit_writeword_indexed(tl,-3,temp);
3532   }
3533   set_jump_target(done0,(int)out);
3534   set_jump_target(done1,(int)out);
3535   set_jump_target(done2,(int)out);
3536   if (opcode[i]==0x2C) { // SDL
3537     emit_testimm(temp,4);
3538     done0=(int)out;
3539     emit_jne(0);
3540     emit_andimm(temp,~3,temp);
3541     emit_writeword_indexed(temp2,4,temp);
3542     set_jump_target(done0,(int)out);
3543   }
3544   if (opcode[i]==0x2D) { // SDR
3545     emit_testimm(temp,4);
3546     done0=(int)out;
3547     emit_jeq(0);
3548     emit_andimm(temp,~3,temp);
3549     emit_writeword_indexed(temp2,-4,temp);
3550     set_jump_target(done0,(int)out);
3551   }
3552   if(!c||!memtarget)
3553     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3554   if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3555     #ifdef RAM_OFFSET
3556     int map=get_reg(i_regs->regmap,ROREG);
3557     if(map<0) map=HOST_TEMPREG;
3558     gen_orig_addr_w(temp,map);
3559     #else
3560     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3561     #endif
3562     #if defined(HOST_IMM8)
3563     int ir=get_reg(i_regs->regmap,INVCP);
3564     assert(ir>=0);
3565     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3566     #else
3567     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3568     #endif
3569     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3570     emit_callne(invalidate_addr_reg[temp]);
3571     #else
3572     jaddr2=(int)out;
3573     emit_jne(0);
3574     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3575     #endif
3576   }
3577   /*
3578     emit_pusha();
3579     //save_regs(0x100f);
3580         emit_readword((int)&last_count,ECX);
3581         if(get_reg(i_regs->regmap,CCREG)<0)
3582           emit_loadreg(CCREG,HOST_CCREG);
3583         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3584         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3585         emit_writeword(HOST_CCREG,(int)&Count);
3586     emit_call((int)memdebug);
3587     emit_popa();
3588     //restore_regs(0x100f);
3589   /**/
3590 }
3591
3592 void c1ls_assemble(int i,struct regstat *i_regs)
3593 {
3594 #ifndef DISABLE_COP1
3595   int s,th,tl;
3596   int temp,ar;
3597   int map=-1;
3598   int offset;
3599   int c=0;
3600   int jaddr,jaddr2=0,jaddr3,type;
3601   int agr=AGEN1+(i&1);
3602   u_int hr,reglist=0;
3603   th=get_reg(i_regs->regmap,FTEMP|64);
3604   tl=get_reg(i_regs->regmap,FTEMP);
3605   s=get_reg(i_regs->regmap,rs1[i]);
3606   temp=get_reg(i_regs->regmap,agr);
3607   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3608   offset=imm[i];
3609   assert(tl>=0);
3610   assert(rs1[i]>0);
3611   assert(temp>=0);
3612   for(hr=0;hr<HOST_REGS;hr++) {
3613     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3614   }
3615   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3616   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3617   {
3618     // Loads use a temporary register which we need to save
3619     reglist|=1<<temp;
3620   }
3621   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3622     ar=temp;
3623   else // LWC1/LDC1
3624     ar=tl;
3625   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3626   //else c=(i_regs->wasconst>>s)&1;
3627   if(s>=0) c=(i_regs->wasconst>>s)&1;
3628   // Check cop1 unusable
3629   if(!cop1_usable) {
3630     signed char rs=get_reg(i_regs->regmap,CSREG);
3631     assert(rs>=0);
3632     emit_testimm(rs,0x20000000);
3633     jaddr=(int)out;
3634     emit_jeq(0);
3635     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3636     cop1_usable=1;
3637   }
3638   if (opcode[i]==0x39) { // SWC1 (get float address)
3639     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3640   }
3641   if (opcode[i]==0x3D) { // SDC1 (get double address)
3642     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3643   }
3644   // Generate address + offset
3645   if(!using_tlb) {
3646     if(!c)
3647       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3648   }
3649   else
3650   {
3651     map=get_reg(i_regs->regmap,TLREG);
3652     assert(map>=0);
3653     reglist&=~(1<<map);
3654     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3655       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3656     }
3657     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3658       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3659     }
3660   }
3661   if (opcode[i]==0x39) { // SWC1 (read float)
3662     emit_readword_indexed(0,tl,tl);
3663   }
3664   if (opcode[i]==0x3D) { // SDC1 (read double)
3665     emit_readword_indexed(4,tl,th);
3666     emit_readword_indexed(0,tl,tl);
3667   }
3668   if (opcode[i]==0x31) { // LWC1 (get target address)
3669     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3670   }
3671   if (opcode[i]==0x35) { // LDC1 (get target address)
3672     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3673   }
3674   if(!using_tlb) {
3675     if(!c) {
3676       jaddr2=(int)out;
3677       emit_jno(0);
3678     }
3679     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3680       jaddr2=(int)out;
3681       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3682     }
3683     #ifdef DESTRUCTIVE_SHIFT
3684     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3685       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3686     }
3687     #endif
3688   }else{
3689     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3690       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3691     }
3692     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3693       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3694     }
3695   }
3696   if (opcode[i]==0x31) { // LWC1
3697     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3698     //gen_tlb_addr_r(ar,map);
3699     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3700     #ifdef HOST_IMM_ADDR32
3701     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3702     else
3703     #endif
3704     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3705     type=LOADW_STUB;
3706   }
3707   if (opcode[i]==0x35) { // LDC1
3708     assert(th>=0);
3709     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3710     //gen_tlb_addr_r(ar,map);
3711     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3712     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3713     #ifdef HOST_IMM_ADDR32
3714     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3715     else
3716     #endif
3717     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3718     type=LOADD_STUB;
3719   }
3720   if (opcode[i]==0x39) { // SWC1
3721     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3722     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3723     type=STOREW_STUB;
3724   }
3725   if (opcode[i]==0x3D) { // SDC1
3726     assert(th>=0);
3727     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3728     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3729     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3730     type=STORED_STUB;
3731   }
3732   if(!using_tlb&&!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3733     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3734       #ifndef DESTRUCTIVE_SHIFT
3735       temp=offset||c||s<0?ar:s;
3736       #endif
3737       #if defined(HOST_IMM8)
3738       int ir=get_reg(i_regs->regmap,INVCP);
3739       assert(ir>=0);
3740       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3741       #else
3742       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3743       #endif
3744       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3745       emit_callne(invalidate_addr_reg[temp]);
3746       #else
3747       jaddr3=(int)out;
3748       emit_jne(0);
3749       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3750       #endif
3751     }
3752   }
3753   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3754   if (opcode[i]==0x31) { // LWC1 (write float)
3755     emit_writeword_indexed(tl,0,temp);
3756   }
3757   if (opcode[i]==0x35) { // LDC1 (write double)
3758     emit_writeword_indexed(th,4,temp);
3759     emit_writeword_indexed(tl,0,temp);
3760   }
3761   //if(opcode[i]==0x39)
3762   /*if(opcode[i]==0x39||opcode[i]==0x31)
3763   {
3764     emit_pusha();
3765         emit_readword((int)&last_count,ECX);
3766         if(get_reg(i_regs->regmap,CCREG)<0)
3767           emit_loadreg(CCREG,HOST_CCREG);
3768         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3769         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3770         emit_writeword(HOST_CCREG,(int)&Count);
3771     emit_call((int)memdebug);
3772     emit_popa();
3773   }/**/
3774 #else
3775   cop1_unusable(i, i_regs);
3776 #endif
3777 }
3778
3779 void c2ls_assemble(int i,struct regstat *i_regs)
3780 {
3781   int s,tl;
3782   int ar;
3783   int offset;
3784   int memtarget=0,c=0;
3785   int jaddr2=0,jaddr3,type;
3786   int agr=AGEN1+(i&1);
3787   int fastio_reg_override=0;
3788   u_int hr,reglist=0;
3789   u_int copr=(source[i]>>16)&0x1f;
3790   s=get_reg(i_regs->regmap,rs1[i]);
3791   tl=get_reg(i_regs->regmap,FTEMP);
3792   offset=imm[i];
3793   assert(rs1[i]>0);
3794   assert(tl>=0);
3795   assert(!using_tlb);
3796
3797   for(hr=0;hr<HOST_REGS;hr++) {
3798     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3799   }
3800   if(i_regs->regmap[HOST_CCREG]==CCREG)
3801     reglist&=~(1<<HOST_CCREG);
3802
3803   // get the address
3804   if (opcode[i]==0x3a) { // SWC2
3805     ar=get_reg(i_regs->regmap,agr);
3806     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3807     reglist|=1<<ar;
3808   } else { // LWC2
3809     ar=tl;
3810   }
3811   if(s>=0) c=(i_regs->wasconst>>s)&1;
3812   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3813   if (!offset&&!c&&s>=0) ar=s;
3814   assert(ar>=0);
3815
3816   if (opcode[i]==0x3a) { // SWC2
3817     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3818     type=STOREW_STUB;
3819   }
3820   else
3821     type=LOADW_STUB;
3822
3823   if(c&&!memtarget) {
3824     jaddr2=(int)out;
3825     emit_jmp(0); // inline_readstub/inline_writestub?
3826   }
3827   else {
3828     if(!c) {
3829       jaddr2=emit_fastpath_cmp_jump(i,ar,&fastio_reg_override);
3830     }
3831     else if(ram_offset&&memtarget) {
3832       emit_addimm(ar,ram_offset,HOST_TEMPREG);
3833       fastio_reg_override=HOST_TEMPREG;
3834     }
3835     if (opcode[i]==0x32) { // LWC2
3836       #ifdef HOST_IMM_ADDR32
3837       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3838       else
3839       #endif
3840       int a=ar;
3841       if(fastio_reg_override) a=fastio_reg_override;
3842       emit_readword_indexed(0,a,tl);
3843     }
3844     if (opcode[i]==0x3a) { // SWC2
3845       #ifdef DESTRUCTIVE_SHIFT
3846       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3847       #endif
3848       int a=ar;
3849       if(fastio_reg_override) a=fastio_reg_override;
3850       emit_writeword_indexed(tl,0,a);
3851     }
3852   }
3853   if(jaddr2)
3854     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3855   if(opcode[i]==0x3a) // SWC2
3856   if(!(i_regs->waswritten&(1<<rs1[i]))&&!(new_dynarec_hacks&NDHACK_NO_SMC_CHECK)) {
3857 #if defined(HOST_IMM8)
3858     int ir=get_reg(i_regs->regmap,INVCP);
3859     assert(ir>=0);
3860     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3861 #else
3862     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3863 #endif
3864     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3865     emit_callne(invalidate_addr_reg[ar]);
3866     #else
3867     jaddr3=(int)out;
3868     emit_jne(0);
3869     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3870     #endif
3871   }
3872   if (opcode[i]==0x32) { // LWC2
3873     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3874   }
3875 }
3876
3877 #ifndef multdiv_assemble
3878 void multdiv_assemble(int i,struct regstat *i_regs)
3879 {
3880   printf("Need multdiv_assemble for this architecture.\n");
3881   exit(1);
3882 }
3883 #endif
3884
3885 void mov_assemble(int i,struct regstat *i_regs)
3886 {
3887   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3888   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3889   if(rt1[i]) {
3890     signed char sh,sl,th,tl;
3891     th=get_reg(i_regs->regmap,rt1[i]|64);
3892     tl=get_reg(i_regs->regmap,rt1[i]);
3893     //assert(tl>=0);
3894     if(tl>=0) {
3895       sh=get_reg(i_regs->regmap,rs1[i]|64);
3896       sl=get_reg(i_regs->regmap,rs1[i]);
3897       if(sl>=0) emit_mov(sl,tl);
3898       else emit_loadreg(rs1[i],tl);
3899       if(th>=0) {
3900         if(sh>=0) emit_mov(sh,th);
3901         else emit_loadreg(rs1[i]|64,th);
3902       }
3903     }
3904   }
3905 }
3906
3907 #ifndef fconv_assemble
3908 void fconv_assemble(int i,struct regstat *i_regs)
3909 {
3910   printf("Need fconv_assemble for this architecture.\n");
3911   exit(1);
3912 }
3913 #endif
3914
3915 #if 0
3916 void float_assemble(int i,struct regstat *i_regs)
3917 {
3918   printf("Need float_assemble for this architecture.\n");
3919   exit(1);
3920 }
3921 #endif
3922
3923 void syscall_assemble(int i,struct regstat *i_regs)
3924 {
3925   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3926   assert(ccreg==HOST_CCREG);
3927   assert(!is_delayslot);
3928   emit_movimm(start+i*4,EAX); // Get PC
3929   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3930   emit_jmp((int)jump_syscall_hle); // XXX
3931 }
3932
3933 void hlecall_assemble(int i,struct regstat *i_regs)
3934 {
3935   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3936   assert(ccreg==HOST_CCREG);
3937   assert(!is_delayslot);
3938   emit_movimm(start+i*4+4,0); // Get PC
3939   emit_movimm((int)psxHLEt[source[i]&7],1);
3940   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG); // XXX
3941   emit_jmp((int)jump_hlecall);
3942 }
3943
3944 void intcall_assemble(int i,struct regstat *i_regs)
3945 {
3946   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3947   assert(ccreg==HOST_CCREG);
3948   assert(!is_delayslot);
3949   emit_movimm(start+i*4,0); // Get PC
3950   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]),HOST_CCREG);
3951   emit_jmp((int)jump_intcall);
3952 }
3953
3954 void ds_assemble(int i,struct regstat *i_regs)
3955 {
3956   speculate_register_values(i);
3957   is_delayslot=1;
3958   switch(itype[i]) {
3959     case ALU:
3960       alu_assemble(i,i_regs);break;
3961     case IMM16:
3962       imm16_assemble(i,i_regs);break;
3963     case SHIFT:
3964       shift_assemble(i,i_regs);break;
3965     case SHIFTIMM:
3966       shiftimm_assemble(i,i_regs);break;
3967     case LOAD:
3968       load_assemble(i,i_regs);break;
3969     case LOADLR:
3970       loadlr_assemble(i,i_regs);break;
3971     case STORE:
3972       store_assemble(i,i_regs);break;
3973     case STORELR:
3974       storelr_assemble(i,i_regs);break;
3975     case COP0:
3976       cop0_assemble(i,i_regs);break;
3977     case COP1:
3978       cop1_assemble(i,i_regs);break;
3979     case C1LS:
3980       c1ls_assemble(i,i_regs);break;
3981     case COP2:
3982       cop2_assemble(i,i_regs);break;
3983     case C2LS:
3984       c2ls_assemble(i,i_regs);break;
3985     case C2OP:
3986       c2op_assemble(i,i_regs);break;
3987     case FCONV:
3988       fconv_assemble(i,i_regs);break;
3989     case FLOAT:
3990       float_assemble(i,i_regs);break;
3991     case FCOMP:
3992       fcomp_assemble(i,i_regs);break;
3993     case MULTDIV:
3994       multdiv_assemble(i,i_regs);break;
3995     case MOV:
3996       mov_assemble(i,i_regs);break;
3997     case SYSCALL:
3998     case HLECALL:
3999     case INTCALL:
4000     case SPAN:
4001     case UJUMP:
4002     case RJUMP:
4003     case CJUMP:
4004     case SJUMP:
4005     case FJUMP:
4006       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4007   }
4008   is_delayslot=0;
4009 }
4010
4011 // Is the branch target a valid internal jump?
4012 int internal_branch(uint64_t i_is32,int addr)
4013 {
4014   if(addr&1) return 0; // Indirect (register) jump
4015   if(addr>=start && addr<start+slen*4-4)
4016   {
4017     int t=(addr-start)>>2;
4018     // Delay slots are not valid branch targets
4019     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4020     // 64 -> 32 bit transition requires a recompile
4021     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
4022     {
4023       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
4024       else printf("optimizable: yes\n");
4025     }*/
4026     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4027 #ifndef FORCE32
4028     if(requires_32bit[t]&~i_is32) return 0;
4029     else
4030 #endif
4031       return 1;
4032   }
4033   return 0;
4034 }
4035
4036 #ifndef wb_invalidate
4037 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
4038   uint64_t u,uint64_t uu)
4039 {
4040   int hr;
4041   for(hr=0;hr<HOST_REGS;hr++) {
4042     if(hr!=EXCLUDE_REG) {
4043       if(pre[hr]!=entry[hr]) {
4044         if(pre[hr]>=0) {
4045           if((dirty>>hr)&1) {
4046             if(get_reg(entry,pre[hr])<0) {
4047               if(pre[hr]<64) {
4048                 if(!((u>>pre[hr])&1)) {
4049                   emit_storereg(pre[hr],hr);
4050                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
4051                     emit_sarimm(hr,31,hr);
4052                     emit_storereg(pre[hr]|64,hr);
4053                   }
4054                 }
4055               }else{
4056                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
4057                   emit_storereg(pre[hr],hr);
4058                 }
4059               }
4060             }
4061           }
4062         }
4063       }
4064     }
4065   }
4066   // Move from one register to another (no writeback)
4067   for(hr=0;hr<HOST_REGS;hr++) {
4068     if(hr!=EXCLUDE_REG) {
4069       if(pre[hr]!=entry[hr]) {
4070         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
4071           int nr;
4072           if((nr=get_reg(entry,pre[hr]))>=0) {
4073             emit_mov(hr,nr);
4074           }
4075         }
4076       }
4077     }
4078   }
4079 }
4080 #endif
4081
4082 // Load the specified registers
4083 // This only loads the registers given as arguments because
4084 // we don't want to load things that will be overwritten
4085 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
4086 {
4087   int hr;
4088   // Load 32-bit regs
4089   for(hr=0;hr<HOST_REGS;hr++) {
4090     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4091       if(entry[hr]!=regmap[hr]) {
4092         if(regmap[hr]==rs1||regmap[hr]==rs2)
4093         {
4094           if(regmap[hr]==0) {
4095             emit_zeroreg(hr);
4096           }
4097           else
4098           {
4099             emit_loadreg(regmap[hr],hr);
4100           }
4101         }
4102       }
4103     }
4104   }
4105   //Load 64-bit regs
4106   for(hr=0;hr<HOST_REGS;hr++) {
4107     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4108       if(entry[hr]!=regmap[hr]) {
4109         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
4110         {
4111           assert(regmap[hr]!=64);
4112           if((is32>>(regmap[hr]&63))&1) {
4113             int lr=get_reg(regmap,regmap[hr]-64);
4114             if(lr>=0)
4115               emit_sarimm(lr,31,hr);
4116             else
4117               emit_loadreg(regmap[hr],hr);
4118           }
4119           else
4120           {
4121             emit_loadreg(regmap[hr],hr);
4122           }
4123         }
4124       }
4125     }
4126   }
4127 }
4128
4129 // Load registers prior to the start of a loop
4130 // so that they are not loaded within the loop
4131 static void loop_preload(signed char pre[],signed char entry[])
4132 {
4133   int hr;
4134   for(hr=0;hr<HOST_REGS;hr++) {
4135     if(hr!=EXCLUDE_REG) {
4136       if(pre[hr]!=entry[hr]) {
4137         if(entry[hr]>=0) {
4138           if(get_reg(pre,entry[hr])<0) {
4139             assem_debug("loop preload:\n");
4140             //printf("loop preload: %d\n",hr);
4141             if(entry[hr]==0) {
4142               emit_zeroreg(hr);
4143             }
4144             else if(entry[hr]<TEMPREG)
4145             {
4146               emit_loadreg(entry[hr],hr);
4147             }
4148             else if(entry[hr]-64<TEMPREG)
4149             {
4150               emit_loadreg(entry[hr],hr);
4151             }
4152           }
4153         }
4154       }
4155     }
4156   }
4157 }
4158
4159 // Generate address for load/store instruction
4160 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4161 void address_generation(int i,struct regstat *i_regs,signed char entry[])
4162 {
4163   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
4164     int ra=-1;
4165     int agr=AGEN1+(i&1);
4166     int mgr=MGEN1+(i&1);
4167     if(itype[i]==LOAD) {
4168       ra=get_reg(i_regs->regmap,rt1[i]);
4169       if(ra<0) ra=get_reg(i_regs->regmap,-1);
4170       assert(ra>=0);
4171     }
4172     if(itype[i]==LOADLR) {
4173       ra=get_reg(i_regs->regmap,FTEMP);
4174     }
4175     if(itype[i]==STORE||itype[i]==STORELR) {
4176       ra=get_reg(i_regs->regmap,agr);
4177       if(ra<0) ra=get_reg(i_regs->regmap,-1);
4178     }
4179     if(itype[i]==C1LS||itype[i]==C2LS) {
4180       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
4181         ra=get_reg(i_regs->regmap,FTEMP);
4182       else { // SWC1/SDC1/SWC2/SDC2
4183         ra=get_reg(i_regs->regmap,agr);
4184         if(ra<0) ra=get_reg(i_regs->regmap,-1);
4185       }
4186     }
4187     int rs=get_reg(i_regs->regmap,rs1[i]);
4188     int rm=get_reg(i_regs->regmap,TLREG);
4189     if(ra>=0) {
4190       int offset=imm[i];
4191       int c=(i_regs->wasconst>>rs)&1;
4192       if(rs1[i]==0) {
4193         // Using r0 as a base address
4194         /*if(rm>=0) {
4195           if(!entry||entry[rm]!=mgr) {
4196             generate_map_const(offset,rm);
4197           } // else did it in the previous cycle
4198         }*/
4199         if(!entry||entry[ra]!=agr) {
4200           if (opcode[i]==0x22||opcode[i]==0x26) {
4201             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4202           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4203             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4204           }else{
4205             emit_movimm(offset,ra);
4206           }
4207         } // else did it in the previous cycle
4208       }
4209       else if(rs<0) {
4210         if(!entry||entry[ra]!=rs1[i])
4211           emit_loadreg(rs1[i],ra);
4212         //if(!entry||entry[ra]!=rs1[i])
4213         //  printf("poor load scheduling!\n");
4214       }
4215       else if(c) {
4216 #ifndef DISABLE_TLB
4217         if(rm>=0) {
4218           if(!entry||entry[rm]!=mgr) {
4219             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4220               // Stores to memory go thru the mapper to detect self-modifying
4221               // code, loads don't.
4222               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4223                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4224                 generate_map_const(constmap[i][rs]+offset,rm);
4225             }else{
4226               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4227                 generate_map_const(constmap[i][rs]+offset,rm);
4228             }
4229           }
4230         }
4231 #endif
4232         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4233           if(!entry||entry[ra]!=agr) {
4234             if (opcode[i]==0x22||opcode[i]==0x26) {
4235               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4236             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4237               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4238             }else{
4239               #ifdef HOST_IMM_ADDR32
4240               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4241                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4242               #endif
4243               emit_movimm(constmap[i][rs]+offset,ra);
4244               regs[i].loadedconst|=1<<ra;
4245             }
4246           } // else did it in the previous cycle
4247         } // else load_consts already did it
4248       }
4249       if(offset&&!c&&rs1[i]) {
4250         if(rs>=0) {
4251           emit_addimm(rs,offset,ra);
4252         }else{
4253           emit_addimm(ra,offset,ra);
4254         }
4255       }
4256     }
4257   }
4258   // Preload constants for next instruction
4259   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4260     int agr,ra;
4261     #if !defined(HOST_IMM_ADDR32) && !defined(DISABLE_TLB)
4262     // Mapper entry
4263     agr=MGEN1+((i+1)&1);
4264     ra=get_reg(i_regs->regmap,agr);
4265     if(ra>=0) {
4266       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4267       int offset=imm[i+1];
4268       int c=(regs[i+1].wasconst>>rs)&1;
4269       if(c) {
4270         if(itype[i+1]==STORE||itype[i+1]==STORELR
4271            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4272           // Stores to memory go thru the mapper to detect self-modifying
4273           // code, loads don't.
4274           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4275              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4276             generate_map_const(constmap[i+1][rs]+offset,ra);
4277         }else{
4278           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4279             generate_map_const(constmap[i+1][rs]+offset,ra);
4280         }
4281       }
4282       /*else if(rs1[i]==0) {
4283         generate_map_const(offset,ra);
4284       }*/
4285     }
4286     #endif
4287     // Actual address
4288     agr=AGEN1+((i+1)&1);
4289     ra=get_reg(i_regs->regmap,agr);
4290     if(ra>=0) {
4291       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4292       int offset=imm[i+1];
4293       int c=(regs[i+1].wasconst>>rs)&1;
4294       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4295         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4296           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4297         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4298           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4299         }else{
4300           #ifdef HOST_IMM_ADDR32
4301           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4302              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4303           #endif
4304           emit_movimm(constmap[i+1][rs]+offset,ra);
4305           regs[i+1].loadedconst|=1<<ra;
4306         }
4307       }
4308       else if(rs1[i+1]==0) {
4309         // Using r0 as a base address
4310         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4311           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4312         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4313           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4314         }else{
4315           emit_movimm(offset,ra);
4316         }
4317       }
4318     }
4319   }
4320 }
4321
4322 int get_final_value(int hr, int i, int *value)
4323 {
4324   int reg=regs[i].regmap[hr];
4325   while(i<slen-1) {
4326     if(regs[i+1].regmap[hr]!=reg) break;
4327     if(!((regs[i+1].isconst>>hr)&1)) break;
4328     if(bt[i+1]) break;
4329     i++;
4330   }
4331   if(i<slen-1) {
4332     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4333       *value=constmap[i][hr];
4334       return 1;
4335     }
4336     if(!bt[i+1]) {
4337       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4338         // Load in delay slot, out-of-order execution
4339         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4340         {
4341           #ifdef HOST_IMM_ADDR32
4342           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4343           #endif
4344           // Precompute load address
4345           *value=constmap[i][hr]+imm[i+2];
4346           return 1;
4347         }
4348       }
4349       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4350       {
4351         #ifdef HOST_IMM_ADDR32
4352         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4353         #endif
4354         // Precompute load address
4355         *value=constmap[i][hr]+imm[i+1];
4356         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4357         return 1;
4358       }
4359     }
4360   }
4361   *value=constmap[i][hr];
4362   //printf("c=%x\n",(int)constmap[i][hr]);
4363   if(i==slen-1) return 1;
4364   if(reg<64) {
4365     return !((unneeded_reg[i+1]>>reg)&1);
4366   }else{
4367     return !((unneeded_reg_upper[i+1]>>reg)&1);
4368   }
4369 }
4370
4371 // Load registers with known constants
4372 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4373 {
4374   int hr,hr2;
4375   // propagate loaded constant flags
4376   if(i==0||bt[i])
4377     regs[i].loadedconst=0;
4378   else {
4379     for(hr=0;hr<HOST_REGS;hr++) {
4380       if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((regs[i-1].isconst>>hr)&1)&&pre[hr]==regmap[hr]
4381          &&regmap[hr]==regs[i-1].regmap[hr]&&((regs[i-1].loadedconst>>hr)&1))
4382       {
4383         regs[i].loadedconst|=1<<hr;
4384       }
4385     }
4386   }
4387   // Load 32-bit regs
4388   for(hr=0;hr<HOST_REGS;hr++) {
4389     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4390       //if(entry[hr]!=regmap[hr]) {
4391       if(!((regs[i].loadedconst>>hr)&1)) {
4392         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4393           int value,similar=0;
4394           if(get_final_value(hr,i,&value)) {
4395             // see if some other register has similar value
4396             for(hr2=0;hr2<HOST_REGS;hr2++) {
4397               if(hr2!=EXCLUDE_REG&&((regs[i].loadedconst>>hr2)&1)) {
4398                 if(is_similar_value(value,constmap[i][hr2])) {
4399                   similar=1;
4400                   break;
4401                 }
4402               }
4403             }
4404             if(similar) {
4405               int value2;
4406               if(get_final_value(hr2,i,&value2)) // is this needed?
4407                 emit_movimm_from(value2,hr2,value,hr);
4408               else
4409                 emit_movimm(value,hr);
4410             }
4411             else if(value==0) {
4412               emit_zeroreg(hr);
4413             }
4414             else {
4415               emit_movimm(value,hr);
4416             }
4417           }
4418           regs[i].loadedconst|=1<<hr;
4419         }
4420       }
4421     }
4422   }
4423   // Load 64-bit regs
4424   for(hr=0;hr<HOST_REGS;hr++) {
4425     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4426       //if(entry[hr]!=regmap[hr]) {
4427       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4428         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4429           if((is32>>(regmap[hr]&63))&1) {
4430             int lr=get_reg(regmap,regmap[hr]-64);
4431             assert(lr>=0);
4432             emit_sarimm(lr,31,hr);
4433           }
4434           else
4435           {
4436             int value;
4437             if(get_final_value(hr,i,&value)) {
4438               if(value==0) {
4439                 emit_zeroreg(hr);
4440               }
4441               else {
4442                 emit_movimm(value,hr);
4443               }
4444             }
4445           }
4446         }
4447       }
4448     }
4449   }
4450 }
4451 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4452 {
4453   int hr;
4454   // Load 32-bit regs
4455   for(hr=0;hr<HOST_REGS;hr++) {
4456     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4457       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4458         int value=constmap[i][hr];
4459         if(value==0) {
4460           emit_zeroreg(hr);
4461         }
4462         else {
4463           emit_movimm(value,hr);
4464         }
4465       }
4466     }
4467   }
4468   // Load 64-bit regs
4469   for(hr=0;hr<HOST_REGS;hr++) {
4470     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4471       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4472         if((is32>>(regmap[hr]&63))&1) {
4473           int lr=get_reg(regmap,regmap[hr]-64);
4474           assert(lr>=0);
4475           emit_sarimm(lr,31,hr);
4476         }
4477         else
4478         {
4479           int value=constmap[i][hr];
4480           if(value==0) {
4481             emit_zeroreg(hr);
4482           }
4483           else {
4484             emit_movimm(value,hr);
4485           }
4486         }
4487       }
4488     }
4489   }
4490 }
4491
4492 // Write out all dirty registers (except cycle count)
4493 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4494 {
4495   int hr;
4496   for(hr=0;hr<HOST_REGS;hr++) {
4497     if(hr!=EXCLUDE_REG) {
4498       if(i_regmap[hr]>0) {
4499         if(i_regmap[hr]!=CCREG) {
4500           if((i_dirty>>hr)&1) {
4501             if(i_regmap[hr]<64) {
4502               emit_storereg(i_regmap[hr],hr);
4503 #ifndef FORCE32
4504               if( ((i_is32>>i_regmap[hr])&1) ) {
4505                 #ifdef DESTRUCTIVE_WRITEBACK
4506                 emit_sarimm(hr,31,hr);
4507                 emit_storereg(i_regmap[hr]|64,hr);
4508                 #else
4509                 emit_sarimm(hr,31,HOST_TEMPREG);
4510                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4511                 #endif
4512               }
4513 #endif
4514             }else{
4515               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4516                 emit_storereg(i_regmap[hr],hr);
4517               }
4518             }
4519           }
4520         }
4521       }
4522     }
4523   }
4524 }
4525 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4526 // This writes the registers not written by store_regs_bt
4527 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4528 {
4529   int hr;
4530   int t=(addr-start)>>2;
4531   for(hr=0;hr<HOST_REGS;hr++) {
4532     if(hr!=EXCLUDE_REG) {
4533       if(i_regmap[hr]>0) {
4534         if(i_regmap[hr]!=CCREG) {
4535           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4536             if((i_dirty>>hr)&1) {
4537               if(i_regmap[hr]<64) {
4538                 emit_storereg(i_regmap[hr],hr);
4539 #ifndef FORCE32
4540                 if( ((i_is32>>i_regmap[hr])&1) ) {
4541                   #ifdef DESTRUCTIVE_WRITEBACK
4542                   emit_sarimm(hr,31,hr);
4543                   emit_storereg(i_regmap[hr]|64,hr);
4544                   #else
4545                   emit_sarimm(hr,31,HOST_TEMPREG);
4546                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4547                   #endif
4548                 }
4549 #endif
4550               }else{
4551                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4552                   emit_storereg(i_regmap[hr],hr);
4553                 }
4554               }
4555             }
4556           }
4557         }
4558       }
4559     }
4560   }
4561 }
4562
4563 // Load all registers (except cycle count)
4564 void load_all_regs(signed char i_regmap[])
4565 {
4566   int hr;
4567   for(hr=0;hr<HOST_REGS;hr++) {
4568     if(hr!=EXCLUDE_REG) {
4569       if(i_regmap[hr]==0) {
4570         emit_zeroreg(hr);
4571       }
4572       else
4573       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4574       {
4575         emit_loadreg(i_regmap[hr],hr);
4576       }
4577     }
4578   }
4579 }
4580
4581 // Load all current registers also needed by next instruction
4582 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4583 {
4584   int hr;
4585   for(hr=0;hr<HOST_REGS;hr++) {
4586     if(hr!=EXCLUDE_REG) {
4587       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4588         if(i_regmap[hr]==0) {
4589           emit_zeroreg(hr);
4590         }
4591         else
4592         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4593         {
4594           emit_loadreg(i_regmap[hr],hr);
4595         }
4596       }
4597     }
4598   }
4599 }
4600
4601 // Load all regs, storing cycle count if necessary
4602 void load_regs_entry(int t)
4603 {
4604   int hr;
4605   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_ADJUST(1),HOST_CCREG);
4606   else if(ccadj[t]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST(ccadj[t]),HOST_CCREG);
4607   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4608     emit_storereg(CCREG,HOST_CCREG);
4609   }
4610   // Load 32-bit regs
4611   for(hr=0;hr<HOST_REGS;hr++) {
4612     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4613       if(regs[t].regmap_entry[hr]==0) {
4614         emit_zeroreg(hr);
4615       }
4616       else if(regs[t].regmap_entry[hr]!=CCREG)
4617       {
4618         emit_loadreg(regs[t].regmap_entry[hr],hr);
4619       }
4620     }
4621   }
4622   // Load 64-bit regs
4623   for(hr=0;hr<HOST_REGS;hr++) {
4624     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4625       assert(regs[t].regmap_entry[hr]!=64);
4626       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4627         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4628         if(lr<0) {
4629           emit_loadreg(regs[t].regmap_entry[hr],hr);
4630         }
4631         else
4632         {
4633           emit_sarimm(lr,31,hr);
4634         }
4635       }
4636       else
4637       {
4638         emit_loadreg(regs[t].regmap_entry[hr],hr);
4639       }
4640     }
4641   }
4642 }
4643
4644 // Store dirty registers prior to branch
4645 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4646 {
4647   if(internal_branch(i_is32,addr))
4648   {
4649     int t=(addr-start)>>2;
4650     int hr;
4651     for(hr=0;hr<HOST_REGS;hr++) {
4652       if(hr!=EXCLUDE_REG) {
4653         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4654           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4655             if((i_dirty>>hr)&1) {
4656               if(i_regmap[hr]<64) {
4657                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4658                   emit_storereg(i_regmap[hr],hr);
4659                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4660                     #ifdef DESTRUCTIVE_WRITEBACK
4661                     emit_sarimm(hr,31,hr);
4662                     emit_storereg(i_regmap[hr]|64,hr);
4663                     #else
4664                     emit_sarimm(hr,31,HOST_TEMPREG);
4665                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4666                     #endif
4667                   }
4668                 }
4669               }else{
4670                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4671                   emit_storereg(i_regmap[hr],hr);
4672                 }
4673               }
4674             }
4675           }
4676         }
4677       }
4678     }
4679   }
4680   else
4681   {
4682     // Branch out of this block, write out all dirty regs
4683     wb_dirtys(i_regmap,i_is32,i_dirty);
4684   }
4685 }
4686
4687 // Load all needed registers for branch target
4688 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4689 {
4690   //if(addr>=start && addr<(start+slen*4))
4691   if(internal_branch(i_is32,addr))
4692   {
4693     int t=(addr-start)>>2;
4694     int hr;
4695     // Store the cycle count before loading something else
4696     if(i_regmap[HOST_CCREG]!=CCREG) {
4697       assert(i_regmap[HOST_CCREG]==-1);
4698     }
4699     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4700       emit_storereg(CCREG,HOST_CCREG);
4701     }
4702     // Load 32-bit regs
4703     for(hr=0;hr<HOST_REGS;hr++) {
4704       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4705         #ifdef DESTRUCTIVE_WRITEBACK
4706         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4707         #else
4708         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4709         #endif
4710           if(regs[t].regmap_entry[hr]==0) {
4711             emit_zeroreg(hr);
4712           }
4713           else if(regs[t].regmap_entry[hr]!=CCREG)
4714           {
4715             emit_loadreg(regs[t].regmap_entry[hr],hr);
4716           }
4717         }
4718       }
4719     }
4720     //Load 64-bit regs
4721     for(hr=0;hr<HOST_REGS;hr++) {
4722       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4723         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4724           assert(regs[t].regmap_entry[hr]!=64);
4725           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4726             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4727             if(lr<0) {
4728               emit_loadreg(regs[t].regmap_entry[hr],hr);
4729             }
4730             else
4731             {
4732               emit_sarimm(lr,31,hr);
4733             }
4734           }
4735           else
4736           {
4737             emit_loadreg(regs[t].regmap_entry[hr],hr);
4738           }
4739         }
4740         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4741           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4742           assert(lr>=0);
4743           emit_sarimm(lr,31,hr);
4744         }
4745       }
4746     }
4747   }
4748 }
4749
4750 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4751 {
4752   if(addr>=start && addr<start+slen*4-4)
4753   {
4754     int t=(addr-start)>>2;
4755     int hr;
4756     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4757     for(hr=0;hr<HOST_REGS;hr++)
4758     {
4759       if(hr!=EXCLUDE_REG)
4760       {
4761         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4762         {
4763           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4764           {
4765             return 0;
4766           }
4767           else
4768           if((i_dirty>>hr)&1)
4769           {
4770             if(i_regmap[hr]<TEMPREG)
4771             {
4772               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4773                 return 0;
4774             }
4775             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4776             {
4777               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4778                 return 0;
4779             }
4780           }
4781         }
4782         else // Same register but is it 32-bit or dirty?
4783         if(i_regmap[hr]>=0)
4784         {
4785           if(!((regs[t].dirty>>hr)&1))
4786           {
4787             if((i_dirty>>hr)&1)
4788             {
4789               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4790               {
4791                 //printf("%x: dirty no match\n",addr);
4792                 return 0;
4793               }
4794             }
4795           }
4796           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4797           {
4798             //printf("%x: is32 no match\n",addr);
4799             return 0;
4800           }
4801         }
4802       }
4803     }
4804     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4805 #ifndef FORCE32
4806     if(requires_32bit[t]&~i_is32) return 0;
4807 #endif
4808     // Delay slots are not valid branch targets
4809     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4810     // Delay slots require additional processing, so do not match
4811     if(is_ds[t]) return 0;
4812   }
4813   else
4814   {
4815     int hr;
4816     for(hr=0;hr<HOST_REGS;hr++)
4817     {
4818       if(hr!=EXCLUDE_REG)
4819       {
4820         if(i_regmap[hr]>=0)
4821         {
4822           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4823           {
4824             if((i_dirty>>hr)&1)
4825             {
4826               return 0;
4827             }
4828           }
4829         }
4830       }
4831     }
4832   }
4833   return 1;
4834 }
4835
4836 // Used when a branch jumps into the delay slot of another branch
4837 void ds_assemble_entry(int i)
4838 {
4839   int t=(ba[i]-start)>>2;
4840   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4841   assem_debug("Assemble delay slot at %x\n",ba[i]);
4842   assem_debug("<->\n");
4843   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4844     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4845   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4846   address_generation(t,&regs[t],regs[t].regmap_entry);
4847   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4848     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4849   cop1_usable=0;
4850   is_delayslot=0;
4851   switch(itype[t]) {
4852     case ALU:
4853       alu_assemble(t,&regs[t]);break;
4854     case IMM16:
4855       imm16_assemble(t,&regs[t]);break;
4856     case SHIFT:
4857       shift_assemble(t,&regs[t]);break;
4858     case SHIFTIMM:
4859       shiftimm_assemble(t,&regs[t]);break;
4860     case LOAD:
4861       load_assemble(t,&regs[t]);break;
4862     case LOADLR:
4863       loadlr_assemble(t,&regs[t]);break;
4864     case STORE:
4865       store_assemble(t,&regs[t]);break;
4866     case STORELR:
4867       storelr_assemble(t,&regs[t]);break;
4868     case COP0:
4869       cop0_assemble(t,&regs[t]);break;
4870     case COP1:
4871       cop1_assemble(t,&regs[t]);break;
4872     case C1LS:
4873       c1ls_assemble(t,&regs[t]);break;
4874     case COP2:
4875       cop2_assemble(t,&regs[t]);break;
4876     case C2LS:
4877       c2ls_assemble(t,&regs[t]);break;
4878     case C2OP:
4879       c2op_assemble(t,&regs[t]);break;
4880     case FCONV:
4881       fconv_assemble(t,&regs[t]);break;
4882     case FLOAT:
4883       float_assemble(t,&regs[t]);break;
4884     case FCOMP:
4885       fcomp_assemble(t,&regs[t]);break;
4886     case MULTDIV:
4887       multdiv_assemble(t,&regs[t]);break;
4888     case MOV:
4889       mov_assemble(t,&regs[t]);break;
4890     case SYSCALL:
4891     case HLECALL:
4892     case INTCALL:
4893     case SPAN:
4894     case UJUMP:
4895     case RJUMP:
4896     case CJUMP:
4897     case SJUMP:
4898     case FJUMP:
4899       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
4900   }
4901   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4902   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4903   if(internal_branch(regs[t].is32,ba[i]+4))
4904     assem_debug("branch: internal\n");
4905   else
4906     assem_debug("branch: external\n");
4907   assert(internal_branch(regs[t].is32,ba[i]+4));
4908   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4909   emit_jmp(0);
4910 }
4911
4912 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4913 {
4914   int count;
4915   int jaddr;
4916   int idle=0;
4917   int t=0;
4918   if(itype[i]==RJUMP)
4919   {
4920     *adj=0;
4921   }
4922   //if(ba[i]>=start && ba[i]<(start+slen*4))
4923   if(internal_branch(branch_regs[i].is32,ba[i]))
4924   {
4925     t=(ba[i]-start)>>2;
4926     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4927     else *adj=ccadj[t];
4928   }
4929   else
4930   {
4931     *adj=0;
4932   }
4933   count=ccadj[i];
4934   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4935     // Idle loop
4936     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4937     idle=(int)out;
4938     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4939     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4940     jaddr=(int)out;
4941     emit_jmp(0);
4942   }
4943   else if(*adj==0||invert) {
4944     int cycles=CLOCK_ADJUST(count+2);
4945     // faster loop HACK
4946     if (t&&*adj) {
4947       int rel=t-i;
4948       if(-NO_CYCLE_PENALTY_THR<rel&&rel<0)
4949         cycles=CLOCK_ADJUST(*adj)+count+2-*adj;
4950     }
4951     emit_addimm_and_set_flags(cycles,HOST_CCREG);
4952     jaddr=(int)out;
4953     emit_jns(0);
4954   }
4955   else
4956   {
4957     emit_cmpimm(HOST_CCREG,-CLOCK_ADJUST(count+2));
4958     jaddr=(int)out;
4959     emit_jns(0);
4960   }
4961   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4962 }
4963
4964 void do_ccstub(int n)
4965 {
4966   literal_pool(256);
4967   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4968   set_jump_target(stubs[n][1],(int)out);
4969   int i=stubs[n][4];
4970   if(stubs[n][6]==NULLDS) {
4971     // Delay slot instruction is nullified ("likely" branch)
4972     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4973   }
4974   else if(stubs[n][6]!=TAKEN) {
4975     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4976   }
4977   else {
4978     if(internal_branch(branch_regs[i].is32,ba[i]))
4979       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4980   }
4981   if(stubs[n][5]!=-1)
4982   {
4983     // Save PC as return address
4984     emit_movimm(stubs[n][5],EAX);
4985     emit_writeword(EAX,(int)&pcaddr);
4986   }
4987   else
4988   {
4989     // Return address depends on which way the branch goes
4990     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4991     {
4992       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4993       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4994       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4995       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4996       if(rs1[i]==0)
4997       {
4998         s1l=s2l;s1h=s2h;
4999         s2l=s2h=-1;
5000       }
5001       else if(rs2[i]==0)
5002       {
5003         s2l=s2h=-1;
5004       }
5005       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
5006         s1h=s2h=-1;
5007       }
5008       assert(s1l>=0);
5009       #ifdef DESTRUCTIVE_WRITEBACK
5010       if(rs1[i]) {
5011         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
5012           emit_loadreg(rs1[i],s1l);
5013       }
5014       else {
5015         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
5016           emit_loadreg(rs2[i],s1l);
5017       }
5018       if(s2l>=0)
5019         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
5020           emit_loadreg(rs2[i],s2l);
5021       #endif
5022       int hr=0;
5023       int addr=-1,alt=-1,ntaddr=-1;
5024       while(hr<HOST_REGS)
5025       {
5026         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5027            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5028            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5029         {
5030           addr=hr++;break;
5031         }
5032         hr++;
5033       }
5034       while(hr<HOST_REGS)
5035       {
5036         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5037            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5038            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5039         {
5040           alt=hr++;break;
5041         }
5042         hr++;
5043       }
5044       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5045       {
5046         while(hr<HOST_REGS)
5047         {
5048           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5049              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5050              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5051           {
5052             ntaddr=hr;break;
5053           }
5054           hr++;
5055         }
5056         assert(hr<HOST_REGS);
5057       }
5058       if((opcode[i]&0x2f)==4) // BEQ
5059       {
5060         #ifdef HAVE_CMOV_IMM
5061         if(s1h<0) {
5062           if(s2l>=0) emit_cmp(s1l,s2l);
5063           else emit_test(s1l,s1l);
5064           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5065         }
5066         else
5067         #endif
5068         {
5069           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5070           if(s1h>=0) {
5071             if(s2h>=0) emit_cmp(s1h,s2h);
5072             else emit_test(s1h,s1h);
5073             emit_cmovne_reg(alt,addr);
5074           }
5075           if(s2l>=0) emit_cmp(s1l,s2l);
5076           else emit_test(s1l,s1l);
5077           emit_cmovne_reg(alt,addr);
5078         }
5079       }
5080       if((opcode[i]&0x2f)==5) // BNE
5081       {
5082         #ifdef HAVE_CMOV_IMM
5083         if(s1h<0) {
5084           if(s2l>=0) emit_cmp(s1l,s2l);
5085           else emit_test(s1l,s1l);
5086           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5087         }
5088         else
5089         #endif
5090         {
5091           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5092           if(s1h>=0) {
5093             if(s2h>=0) emit_cmp(s1h,s2h);
5094             else emit_test(s1h,s1h);
5095             emit_cmovne_reg(alt,addr);
5096           }
5097           if(s2l>=0) emit_cmp(s1l,s2l);
5098           else emit_test(s1l,s1l);
5099           emit_cmovne_reg(alt,addr);
5100         }
5101       }
5102       if((opcode[i]&0x2f)==6) // BLEZ
5103       {
5104         //emit_movimm(ba[i],alt);
5105         //emit_movimm(start+i*4+8,addr);
5106         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5107         emit_cmpimm(s1l,1);
5108         if(s1h>=0) emit_mov(addr,ntaddr);
5109         emit_cmovl_reg(alt,addr);
5110         if(s1h>=0) {
5111           emit_test(s1h,s1h);
5112           emit_cmovne_reg(ntaddr,addr);
5113           emit_cmovs_reg(alt,addr);
5114         }
5115       }
5116       if((opcode[i]&0x2f)==7) // BGTZ
5117       {
5118         //emit_movimm(ba[i],addr);
5119         //emit_movimm(start+i*4+8,ntaddr);
5120         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5121         emit_cmpimm(s1l,1);
5122         if(s1h>=0) emit_mov(addr,alt);
5123         emit_cmovl_reg(ntaddr,addr);
5124         if(s1h>=0) {
5125           emit_test(s1h,s1h);
5126           emit_cmovne_reg(alt,addr);
5127           emit_cmovs_reg(ntaddr,addr);
5128         }
5129       }
5130       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
5131       {
5132         //emit_movimm(ba[i],alt);
5133         //emit_movimm(start+i*4+8,addr);
5134         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5135         if(s1h>=0) emit_test(s1h,s1h);
5136         else emit_test(s1l,s1l);
5137         emit_cmovs_reg(alt,addr);
5138       }
5139       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
5140       {
5141         //emit_movimm(ba[i],addr);
5142         //emit_movimm(start+i*4+8,alt);
5143         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5144         if(s1h>=0) emit_test(s1h,s1h);
5145         else emit_test(s1l,s1l);
5146         emit_cmovs_reg(alt,addr);
5147       }
5148       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5149         if(source[i]&0x10000) // BC1T
5150         {
5151           //emit_movimm(ba[i],alt);
5152           //emit_movimm(start+i*4+8,addr);
5153           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5154           emit_testimm(s1l,0x800000);
5155           emit_cmovne_reg(alt,addr);
5156         }
5157         else // BC1F
5158         {
5159           //emit_movimm(ba[i],addr);
5160           //emit_movimm(start+i*4+8,alt);
5161           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5162           emit_testimm(s1l,0x800000);
5163           emit_cmovne_reg(alt,addr);
5164         }
5165       }
5166       emit_writeword(addr,(int)&pcaddr);
5167     }
5168     else
5169     if(itype[i]==RJUMP)
5170     {
5171       int r=get_reg(branch_regs[i].regmap,rs1[i]);
5172       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5173         r=get_reg(branch_regs[i].regmap,RTEMP);
5174       }
5175       emit_writeword(r,(int)&pcaddr);
5176     }
5177     else {SysPrintf("Unknown branch type in do_ccstub\n");exit(1);}
5178   }
5179   // Update cycle count
5180   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5181   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
5182   emit_call((int)cc_interrupt);
5183   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_ADJUST((int)stubs[n][3]),HOST_CCREG);
5184   if(stubs[n][6]==TAKEN) {
5185     if(internal_branch(branch_regs[i].is32,ba[i]))
5186       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5187     else if(itype[i]==RJUMP) {
5188       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5189         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5190       else
5191         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5192     }
5193   }else if(stubs[n][6]==NOTTAKEN) {
5194     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5195     else load_all_regs(branch_regs[i].regmap);
5196   }else if(stubs[n][6]==NULLDS) {
5197     // Delay slot instruction is nullified ("likely" branch)
5198     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5199     else load_all_regs(regs[i].regmap);
5200   }else{
5201     load_all_regs(branch_regs[i].regmap);
5202   }
5203   emit_jmp(stubs[n][2]); // return address
5204
5205   /* This works but uses a lot of memory...
5206   emit_readword((int)&last_count,ECX);
5207   emit_add(HOST_CCREG,ECX,EAX);
5208   emit_writeword(EAX,(int)&Count);
5209   emit_call((int)gen_interupt);
5210   emit_readword((int)&Count,HOST_CCREG);
5211   emit_readword((int)&next_interupt,EAX);
5212   emit_readword((int)&pending_exception,EBX);
5213   emit_writeword(EAX,(int)&last_count);
5214   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5215   emit_test(EBX,EBX);
5216   int jne_instr=(int)out;
5217   emit_jne(0);
5218   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5219   load_all_regs(branch_regs[i].regmap);
5220   emit_jmp(stubs[n][2]); // return address
5221   set_jump_target(jne_instr,(int)out);
5222   emit_readword((int)&pcaddr,EAX);
5223   // Call get_addr_ht instead of doing the hash table here.
5224   // This code is executed infrequently and takes up a lot of space
5225   // so smaller is better.
5226   emit_storereg(CCREG,HOST_CCREG);
5227   emit_pushreg(EAX);
5228   emit_call((int)get_addr_ht);
5229   emit_loadreg(CCREG,HOST_CCREG);
5230   emit_addimm(ESP,4,ESP);
5231   emit_jmpreg(EAX);*/
5232 }
5233
5234 add_to_linker(int addr,int target,int ext)
5235 {
5236   link_addr[linkcount][0]=addr;
5237   link_addr[linkcount][1]=target;
5238   link_addr[linkcount][2]=ext;
5239   linkcount++;
5240 }
5241
5242 static void ujump_assemble_write_ra(int i)
5243 {
5244   int rt;
5245   unsigned int return_address;
5246   rt=get_reg(branch_regs[i].regmap,31);
5247   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5248   //assert(rt>=0);
5249   return_address=start+i*4+8;
5250   if(rt>=0) {
5251     #ifdef USE_MINI_HT
5252     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
5253       int temp=-1; // note: must be ds-safe
5254       #ifdef HOST_TEMPREG
5255       temp=HOST_TEMPREG;
5256       #endif
5257       if(temp>=0) do_miniht_insert(return_address,rt,temp);
5258       else emit_movimm(return_address,rt);
5259     }
5260     else
5261     #endif
5262     {
5263       #ifdef REG_PREFETCH
5264       if(temp>=0)
5265       {
5266         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5267       }
5268       #endif
5269       emit_movimm(return_address,rt); // PC into link register
5270       #ifdef IMM_PREFETCH
5271       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5272       #endif
5273     }
5274   }
5275 }
5276
5277 void ujump_assemble(int i,struct regstat *i_regs)
5278 {
5279   signed char *i_regmap=i_regs->regmap;
5280   int ra_done=0;
5281   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5282   address_generation(i+1,i_regs,regs[i].regmap_entry);
5283   #ifdef REG_PREFETCH
5284   int temp=get_reg(branch_regs[i].regmap,PTEMP);
5285   if(rt1[i]==31&&temp>=0)
5286   {
5287     int return_address=start+i*4+8;
5288     if(get_reg(branch_regs[i].regmap,31)>0)
5289     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5290   }
5291   #endif
5292   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5293     ujump_assemble_write_ra(i); // writeback ra for DS
5294     ra_done=1;
5295   }
5296   ds_assemble(i+1,i_regs);
5297   uint64_t bc_unneeded=branch_regs[i].u;
5298   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5299   bc_unneeded|=1|(1LL<<rt1[i]);
5300   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5301   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5302                 bc_unneeded,bc_unneeded_upper);
5303   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5304   if(!ra_done&&rt1[i]==31)
5305     ujump_assemble_write_ra(i);
5306   int cc,adj;
5307   cc=get_reg(branch_regs[i].regmap,CCREG);
5308   assert(cc==HOST_CCREG);
5309   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5310   #ifdef REG_PREFETCH
5311   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5312   #endif
5313   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5314   if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5315   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5316   if(internal_branch(branch_regs[i].is32,ba[i]))
5317     assem_debug("branch: internal\n");
5318   else
5319     assem_debug("branch: external\n");
5320   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5321     ds_assemble_entry(i);
5322   }
5323   else {
5324     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5325     emit_jmp(0);
5326   }
5327 }
5328
5329 static void rjump_assemble_write_ra(int i)
5330 {
5331   int rt,return_address;
5332   assert(rt1[i+1]!=rt1[i]);
5333   assert(rt2[i+1]!=rt1[i]);
5334   rt=get_reg(branch_regs[i].regmap,rt1[i]);
5335   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5336   assert(rt>=0);
5337   return_address=start+i*4+8;
5338   #ifdef REG_PREFETCH
5339   if(temp>=0)
5340   {
5341     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5342   }
5343   #endif
5344   emit_movimm(return_address,rt); // PC into link register
5345   #ifdef IMM_PREFETCH
5346   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5347   #endif
5348 }
5349
5350 void rjump_assemble(int i,struct regstat *i_regs)
5351 {
5352   signed char *i_regmap=i_regs->regmap;
5353   int temp;
5354   int rs,cc,adj;
5355   int ra_done=0;
5356   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5357   assert(rs>=0);
5358   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5359     // Delay slot abuse, make a copy of the branch address register
5360     temp=get_reg(branch_regs[i].regmap,RTEMP);
5361     assert(temp>=0);
5362     assert(regs[i].regmap[temp]==RTEMP);
5363     emit_mov(rs,temp);
5364     rs=temp;
5365   }
5366   address_generation(i+1,i_regs,regs[i].regmap_entry);
5367   #ifdef REG_PREFETCH
5368   if(rt1[i]==31)
5369   {
5370     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5371       int return_address=start+i*4+8;
5372       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5373     }
5374   }
5375   #endif
5376   #ifdef USE_MINI_HT
5377   if(rs1[i]==31) {
5378     int rh=get_reg(regs[i].regmap,RHASH);
5379     if(rh>=0) do_preload_rhash(rh);
5380   }
5381   #endif
5382   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5383     rjump_assemble_write_ra(i);
5384     ra_done=1;
5385   }
5386   ds_assemble(i+1,i_regs);
5387   uint64_t bc_unneeded=branch_regs[i].u;
5388   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5389   bc_unneeded|=1|(1LL<<rt1[i]);
5390   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5391   bc_unneeded&=~(1LL<<rs1[i]);
5392   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5393                 bc_unneeded,bc_unneeded_upper);
5394   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5395   if(!ra_done&&rt1[i]!=0)
5396     rjump_assemble_write_ra(i);
5397   cc=get_reg(branch_regs[i].regmap,CCREG);
5398   assert(cc==HOST_CCREG);
5399   #ifdef USE_MINI_HT
5400   int rh=get_reg(branch_regs[i].regmap,RHASH);
5401   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5402   if(rs1[i]==31) {
5403     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5404     do_preload_rhtbl(ht);
5405     do_rhash(rs,rh);
5406   }
5407   #endif
5408   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5409   #ifdef DESTRUCTIVE_WRITEBACK
5410   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5411     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5412       emit_loadreg(rs1[i],rs);
5413     }
5414   }
5415   #endif
5416   #ifdef REG_PREFETCH
5417   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5418   #endif
5419   #ifdef USE_MINI_HT
5420   if(rs1[i]==31) {
5421     do_miniht_load(ht,rh);
5422   }
5423   #endif
5424   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5425   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5426   //assert(adj==0);
5427   emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5428   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5429 #ifdef PCSX
5430   if(itype[i+1]==COP0&&(source[i+1]&0x3f)==0x10)
5431     // special case for RFE
5432     emit_jmp(0);
5433   else
5434 #endif
5435   emit_jns(0);
5436   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5437   #ifdef USE_MINI_HT
5438   if(rs1[i]==31) {
5439     do_miniht_jump(rs,rh,ht);
5440   }
5441   else
5442   #endif
5443   {
5444     //if(rs!=EAX) emit_mov(rs,EAX);
5445     //emit_jmp((int)jump_vaddr_eax);
5446     emit_jmp(jump_vaddr_reg[rs]);
5447   }
5448   /* Check hash table
5449   temp=!rs;
5450   emit_mov(rs,temp);
5451   emit_shrimm(rs,16,rs);
5452   emit_xor(temp,rs,rs);
5453   emit_movzwl_reg(rs,rs);
5454   emit_shlimm(rs,4,rs);
5455   emit_cmpmem_indexed((int)hash_table,rs,temp);
5456   emit_jne((int)out+14);
5457   emit_readword_indexed((int)hash_table+4,rs,rs);
5458   emit_jmpreg(rs);
5459   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5460   emit_addimm_no_flags(8,rs);
5461   emit_jeq((int)out-17);
5462   // No hit on hash table, call compiler
5463   emit_pushreg(temp);
5464 //DEBUG >
5465 #ifdef DEBUG_CYCLE_COUNT
5466   emit_readword((int)&last_count,ECX);
5467   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5468   emit_readword((int)&next_interupt,ECX);
5469   emit_writeword(HOST_CCREG,(int)&Count);
5470   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5471   emit_writeword(ECX,(int)&last_count);
5472 #endif
5473 //DEBUG <
5474   emit_storereg(CCREG,HOST_CCREG);
5475   emit_call((int)get_addr);
5476   emit_loadreg(CCREG,HOST_CCREG);
5477   emit_addimm(ESP,4,ESP);
5478   emit_jmpreg(EAX);*/
5479   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5480   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5481   #endif
5482 }
5483
5484 void cjump_assemble(int i,struct regstat *i_regs)
5485 {
5486   signed char *i_regmap=i_regs->regmap;
5487   int cc;
5488   int match;
5489   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5490   assem_debug("match=%d\n",match);
5491   int s1h,s1l,s2h,s2l;
5492   int prev_cop1_usable=cop1_usable;
5493   int unconditional=0,nop=0;
5494   int only32=0;
5495   int invert=0;
5496   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5497   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5498   if(!match) invert=1;
5499   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5500   if(i>(ba[i]-start)>>2) invert=1;
5501   #endif
5502
5503   if(ooo[i]) {
5504     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5505     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5506     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5507     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5508   }
5509   else {
5510     s1l=get_reg(i_regmap,rs1[i]);
5511     s1h=get_reg(i_regmap,rs1[i]|64);
5512     s2l=get_reg(i_regmap,rs2[i]);
5513     s2h=get_reg(i_regmap,rs2[i]|64);
5514   }
5515   if(rs1[i]==0&&rs2[i]==0)
5516   {
5517     if(opcode[i]&1) nop=1;
5518     else unconditional=1;
5519     //assert(opcode[i]!=5);
5520     //assert(opcode[i]!=7);
5521     //assert(opcode[i]!=0x15);
5522     //assert(opcode[i]!=0x17);
5523   }
5524   else if(rs1[i]==0)
5525   {
5526     s1l=s2l;s1h=s2h;
5527     s2l=s2h=-1;
5528     only32=(regs[i].was32>>rs2[i])&1;
5529   }
5530   else if(rs2[i]==0)
5531   {
5532     s2l=s2h=-1;
5533     only32=(regs[i].was32>>rs1[i])&1;
5534   }
5535   else {
5536     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5537   }
5538
5539   if(ooo[i]) {
5540     // Out of order execution (delay slot first)
5541     //printf("OOOE\n");
5542     address_generation(i+1,i_regs,regs[i].regmap_entry);
5543     ds_assemble(i+1,i_regs);
5544     int adj;
5545     uint64_t bc_unneeded=branch_regs[i].u;
5546     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5547     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5548     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5549     bc_unneeded|=1;
5550     bc_unneeded_upper|=1;
5551     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5552                   bc_unneeded,bc_unneeded_upper);
5553     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5554     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5555     cc=get_reg(branch_regs[i].regmap,CCREG);
5556     assert(cc==HOST_CCREG);
5557     if(unconditional)
5558       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5559     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5560     //assem_debug("cycle count (adj)\n");
5561     if(unconditional) {
5562       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5563       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5564         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5565         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5566         if(internal)
5567           assem_debug("branch: internal\n");
5568         else
5569           assem_debug("branch: external\n");
5570         if(internal&&is_ds[(ba[i]-start)>>2]) {
5571           ds_assemble_entry(i);
5572         }
5573         else {
5574           add_to_linker((int)out,ba[i],internal);
5575           emit_jmp(0);
5576         }
5577         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5578         if(((u_int)out)&7) emit_addnop(0);
5579         #endif
5580       }
5581     }
5582     else if(nop) {
5583       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5584       int jaddr=(int)out;
5585       emit_jns(0);
5586       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5587     }
5588     else {
5589       int taken=0,nottaken=0,nottaken1=0;
5590       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5591       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5592       if(!only32)
5593       {
5594         assert(s1h>=0);
5595         if(opcode[i]==4) // BEQ
5596         {
5597           if(s2h>=0) emit_cmp(s1h,s2h);
5598           else emit_test(s1h,s1h);
5599           nottaken1=(int)out;
5600           emit_jne(1);
5601         }
5602         if(opcode[i]==5) // BNE
5603         {
5604           if(s2h>=0) emit_cmp(s1h,s2h);
5605           else emit_test(s1h,s1h);
5606           if(invert) taken=(int)out;
5607           else add_to_linker((int)out,ba[i],internal);
5608           emit_jne(0);
5609         }
5610         if(opcode[i]==6) // BLEZ
5611         {
5612           emit_test(s1h,s1h);
5613           if(invert) taken=(int)out;
5614           else add_to_linker((int)out,ba[i],internal);
5615           emit_js(0);
5616           nottaken1=(int)out;
5617           emit_jne(1);
5618         }
5619         if(opcode[i]==7) // BGTZ
5620         {
5621           emit_test(s1h,s1h);
5622           nottaken1=(int)out;
5623           emit_js(1);
5624           if(invert) taken=(int)out;
5625           else add_to_linker((int)out,ba[i],internal);
5626           emit_jne(0);
5627         }
5628       } // if(!only32)
5629
5630       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5631       assert(s1l>=0);
5632       if(opcode[i]==4) // BEQ
5633       {
5634         if(s2l>=0) emit_cmp(s1l,s2l);
5635         else emit_test(s1l,s1l);
5636         if(invert){
5637           nottaken=(int)out;
5638           emit_jne(1);
5639         }else{
5640           add_to_linker((int)out,ba[i],internal);
5641           emit_jeq(0);
5642         }
5643       }
5644       if(opcode[i]==5) // BNE
5645       {
5646         if(s2l>=0) emit_cmp(s1l,s2l);
5647         else emit_test(s1l,s1l);
5648         if(invert){
5649           nottaken=(int)out;
5650           emit_jeq(1);
5651         }else{
5652           add_to_linker((int)out,ba[i],internal);
5653           emit_jne(0);
5654         }
5655       }
5656       if(opcode[i]==6) // BLEZ
5657       {
5658         emit_cmpimm(s1l,1);
5659         if(invert){
5660           nottaken=(int)out;
5661           emit_jge(1);
5662         }else{
5663           add_to_linker((int)out,ba[i],internal);
5664           emit_jl(0);
5665         }
5666       }
5667       if(opcode[i]==7) // BGTZ
5668       {
5669         emit_cmpimm(s1l,1);
5670         if(invert){
5671           nottaken=(int)out;
5672           emit_jl(1);
5673         }else{
5674           add_to_linker((int)out,ba[i],internal);
5675           emit_jge(0);
5676         }
5677       }
5678       if(invert) {
5679         if(taken) set_jump_target(taken,(int)out);
5680         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5681         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5682           if(adj) {
5683             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5684             add_to_linker((int)out,ba[i],internal);
5685           }else{
5686             emit_addnop(13);
5687             add_to_linker((int)out,ba[i],internal*2);
5688           }
5689           emit_jmp(0);
5690         }else
5691         #endif
5692         {
5693           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
5694           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5695           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5696           if(internal)
5697             assem_debug("branch: internal\n");
5698           else
5699             assem_debug("branch: external\n");
5700           if(internal&&is_ds[(ba[i]-start)>>2]) {
5701             ds_assemble_entry(i);
5702           }
5703           else {
5704             add_to_linker((int)out,ba[i],internal);
5705             emit_jmp(0);
5706           }
5707         }
5708         set_jump_target(nottaken,(int)out);
5709       }
5710
5711       if(nottaken1) set_jump_target(nottaken1,(int)out);
5712       if(adj) {
5713         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
5714       }
5715     } // (!unconditional)
5716   } // if(ooo)
5717   else
5718   {
5719     // In-order execution (branch first)
5720     //if(likely[i]) printf("IOL\n");
5721     //else
5722     //printf("IOE\n");
5723     int taken=0,nottaken=0,nottaken1=0;
5724     if(!unconditional&&!nop) {
5725       if(!only32)
5726       {
5727         assert(s1h>=0);
5728         if((opcode[i]&0x2f)==4) // BEQ
5729         {
5730           if(s2h>=0) emit_cmp(s1h,s2h);
5731           else emit_test(s1h,s1h);
5732           nottaken1=(int)out;
5733           emit_jne(2);
5734         }
5735         if((opcode[i]&0x2f)==5) // BNE
5736         {
5737           if(s2h>=0) emit_cmp(s1h,s2h);
5738           else emit_test(s1h,s1h);
5739           taken=(int)out;
5740           emit_jne(1);
5741         }
5742         if((opcode[i]&0x2f)==6) // BLEZ
5743         {
5744           emit_test(s1h,s1h);
5745           taken=(int)out;
5746           emit_js(1);
5747           nottaken1=(int)out;
5748           emit_jne(2);
5749         }
5750         if((opcode[i]&0x2f)==7) // BGTZ
5751         {
5752           emit_test(s1h,s1h);
5753           nottaken1=(int)out;
5754           emit_js(2);
5755           taken=(int)out;
5756           emit_jne(1);
5757         }
5758       } // if(!only32)
5759
5760       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5761       assert(s1l>=0);
5762       if((opcode[i]&0x2f)==4) // BEQ
5763       {
5764         if(s2l>=0) emit_cmp(s1l,s2l);
5765         else emit_test(s1l,s1l);
5766         nottaken=(int)out;
5767         emit_jne(2);
5768       }
5769       if((opcode[i]&0x2f)==5) // BNE
5770       {
5771         if(s2l>=0) emit_cmp(s1l,s2l);
5772         else emit_test(s1l,s1l);
5773         nottaken=(int)out;
5774         emit_jeq(2);
5775       }
5776       if((opcode[i]&0x2f)==6) // BLEZ
5777       {
5778         emit_cmpimm(s1l,1);
5779         nottaken=(int)out;
5780         emit_jge(2);
5781       }
5782       if((opcode[i]&0x2f)==7) // BGTZ
5783       {
5784         emit_cmpimm(s1l,1);
5785         nottaken=(int)out;
5786         emit_jl(2);
5787       }
5788     } // if(!unconditional)
5789     int adj;
5790     uint64_t ds_unneeded=branch_regs[i].u;
5791     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5792     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5793     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5794     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5795     ds_unneeded|=1;
5796     ds_unneeded_upper|=1;
5797     // branch taken
5798     if(!nop) {
5799       if(taken) set_jump_target(taken,(int)out);
5800       assem_debug("1:\n");
5801       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5802                     ds_unneeded,ds_unneeded_upper);
5803       // load regs
5804       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5805       address_generation(i+1,&branch_regs[i],0);
5806       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5807       ds_assemble(i+1,&branch_regs[i]);
5808       cc=get_reg(branch_regs[i].regmap,CCREG);
5809       if(cc==-1) {
5810         emit_loadreg(CCREG,cc=HOST_CCREG);
5811         // CHECK: Is the following instruction (fall thru) allocated ok?
5812       }
5813       assert(cc==HOST_CCREG);
5814       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5815       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5816       assem_debug("cycle count (adj)\n");
5817       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5818       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5819       if(internal)
5820         assem_debug("branch: internal\n");
5821       else
5822         assem_debug("branch: external\n");
5823       if(internal&&is_ds[(ba[i]-start)>>2]) {
5824         ds_assemble_entry(i);
5825       }
5826       else {
5827         add_to_linker((int)out,ba[i],internal);
5828         emit_jmp(0);
5829       }
5830     }
5831     // branch not taken
5832     cop1_usable=prev_cop1_usable;
5833     if(!unconditional) {
5834       if(nottaken1) set_jump_target(nottaken1,(int)out);
5835       set_jump_target(nottaken,(int)out);
5836       assem_debug("2:\n");
5837       if(!likely[i]) {
5838         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5839                       ds_unneeded,ds_unneeded_upper);
5840         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5841         address_generation(i+1,&branch_regs[i],0);
5842         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5843         ds_assemble(i+1,&branch_regs[i]);
5844       }
5845       cc=get_reg(branch_regs[i].regmap,CCREG);
5846       if(cc==-1&&!likely[i]) {
5847         // Cycle count isn't in a register, temporarily load it then write it out
5848         emit_loadreg(CCREG,HOST_CCREG);
5849         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
5850         int jaddr=(int)out;
5851         emit_jns(0);
5852         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5853         emit_storereg(CCREG,HOST_CCREG);
5854       }
5855       else{
5856         cc=get_reg(i_regmap,CCREG);
5857         assert(cc==HOST_CCREG);
5858         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5859         int jaddr=(int)out;
5860         emit_jns(0);
5861         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5862       }
5863     }
5864   }
5865 }
5866
5867 void sjump_assemble(int i,struct regstat *i_regs)
5868 {
5869   signed char *i_regmap=i_regs->regmap;
5870   int cc;
5871   int match;
5872   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5873   assem_debug("smatch=%d\n",match);
5874   int s1h,s1l;
5875   int prev_cop1_usable=cop1_usable;
5876   int unconditional=0,nevertaken=0;
5877   int only32=0;
5878   int invert=0;
5879   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5880   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5881   if(!match) invert=1;
5882   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5883   if(i>(ba[i]-start)>>2) invert=1;
5884   #endif
5885
5886   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5887   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5888
5889   if(ooo[i]) {
5890     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5891     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5892   }
5893   else {
5894     s1l=get_reg(i_regmap,rs1[i]);
5895     s1h=get_reg(i_regmap,rs1[i]|64);
5896   }
5897   if(rs1[i]==0)
5898   {
5899     if(opcode2[i]&1) unconditional=1;
5900     else nevertaken=1;
5901     // These are never taken (r0 is never less than zero)
5902     //assert(opcode2[i]!=0);
5903     //assert(opcode2[i]!=2);
5904     //assert(opcode2[i]!=0x10);
5905     //assert(opcode2[i]!=0x12);
5906   }
5907   else {
5908     only32=(regs[i].was32>>rs1[i])&1;
5909   }
5910
5911   if(ooo[i]) {
5912     // Out of order execution (delay slot first)
5913     //printf("OOOE\n");
5914     address_generation(i+1,i_regs,regs[i].regmap_entry);
5915     ds_assemble(i+1,i_regs);
5916     int adj;
5917     uint64_t bc_unneeded=branch_regs[i].u;
5918     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5919     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5920     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5921     bc_unneeded|=1;
5922     bc_unneeded_upper|=1;
5923     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5924                   bc_unneeded,bc_unneeded_upper);
5925     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5926     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5927     if(rt1[i]==31) {
5928       int rt,return_address;
5929       rt=get_reg(branch_regs[i].regmap,31);
5930       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5931       if(rt>=0) {
5932         // Save the PC even if the branch is not taken
5933         return_address=start+i*4+8;
5934         emit_movimm(return_address,rt); // PC into link register
5935         #ifdef IMM_PREFETCH
5936         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5937         #endif
5938       }
5939     }
5940     cc=get_reg(branch_regs[i].regmap,CCREG);
5941     assert(cc==HOST_CCREG);
5942     if(unconditional)
5943       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5944     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5945     assem_debug("cycle count (adj)\n");
5946     if(unconditional) {
5947       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5948       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5949         if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5950         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5951         if(internal)
5952           assem_debug("branch: internal\n");
5953         else
5954           assem_debug("branch: external\n");
5955         if(internal&&is_ds[(ba[i]-start)>>2]) {
5956           ds_assemble_entry(i);
5957         }
5958         else {
5959           add_to_linker((int)out,ba[i],internal);
5960           emit_jmp(0);
5961         }
5962         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5963         if(((u_int)out)&7) emit_addnop(0);
5964         #endif
5965       }
5966     }
5967     else if(nevertaken) {
5968       emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
5969       int jaddr=(int)out;
5970       emit_jns(0);
5971       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5972     }
5973     else {
5974       int nottaken=0;
5975       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5976       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
5977       if(!only32)
5978       {
5979         assert(s1h>=0);
5980         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5981         {
5982           emit_test(s1h,s1h);
5983           if(invert){
5984             nottaken=(int)out;
5985             emit_jns(1);
5986           }else{
5987             add_to_linker((int)out,ba[i],internal);
5988             emit_js(0);
5989           }
5990         }
5991         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5992         {
5993           emit_test(s1h,s1h);
5994           if(invert){
5995             nottaken=(int)out;
5996             emit_js(1);
5997           }else{
5998             add_to_linker((int)out,ba[i],internal);
5999             emit_jns(0);
6000           }
6001         }
6002       } // if(!only32)
6003       else
6004       {
6005         assert(s1l>=0);
6006         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
6007         {
6008           emit_test(s1l,s1l);
6009           if(invert){
6010             nottaken=(int)out;
6011             emit_jns(1);
6012           }else{
6013             add_to_linker((int)out,ba[i],internal);
6014             emit_js(0);
6015           }
6016         }
6017         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
6018         {
6019           emit_test(s1l,s1l);
6020           if(invert){
6021             nottaken=(int)out;
6022             emit_js(1);
6023           }else{
6024             add_to_linker((int)out,ba[i],internal);
6025             emit_jns(0);
6026           }
6027         }
6028       } // if(!only32)
6029
6030       if(invert) {
6031         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6032         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
6033           if(adj) {
6034             emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
6035             add_to_linker((int)out,ba[i],internal);
6036           }else{
6037             emit_addnop(13);
6038             add_to_linker((int)out,ba[i],internal*2);
6039           }
6040           emit_jmp(0);
6041         }else
6042         #endif
6043         {
6044           if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
6045           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6046           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6047           if(internal)
6048             assem_debug("branch: internal\n");
6049           else
6050             assem_debug("branch: external\n");
6051           if(internal&&is_ds[(ba[i]-start)>>2]) {
6052             ds_assemble_entry(i);
6053           }
6054           else {
6055             add_to_linker((int)out,ba[i],internal);
6056             emit_jmp(0);
6057           }
6058         }
6059         set_jump_target(nottaken,(int)out);
6060       }
6061
6062       if(adj) {
6063         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
6064       }
6065     } // (!unconditional)
6066   } // if(ooo)
6067   else
6068   {
6069     // In-order execution (branch first)
6070     //printf("IOE\n");
6071     int nottaken=0;
6072     if(rt1[i]==31) {
6073       int rt,return_address;
6074       rt=get_reg(branch_regs[i].regmap,31);
6075       if(rt>=0) {
6076         // Save the PC even if the branch is not taken
6077         return_address=start+i*4+8;
6078         emit_movimm(return_address,rt); // PC into link register
6079         #ifdef IMM_PREFETCH
6080         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
6081         #endif
6082       }
6083     }
6084     if(!unconditional) {
6085       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6086       if(!only32)
6087       {
6088         assert(s1h>=0);
6089         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6090         {
6091           emit_test(s1h,s1h);
6092           nottaken=(int)out;
6093           emit_jns(1);
6094         }
6095         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6096         {
6097           emit_test(s1h,s1h);
6098           nottaken=(int)out;
6099           emit_js(1);
6100         }
6101       } // if(!only32)
6102       else
6103       {
6104         assert(s1l>=0);
6105         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6106         {
6107           emit_test(s1l,s1l);
6108           nottaken=(int)out;
6109           emit_jns(1);
6110         }
6111         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6112         {
6113           emit_test(s1l,s1l);
6114           nottaken=(int)out;
6115           emit_js(1);
6116         }
6117       }
6118     } // if(!unconditional)
6119     int adj;
6120     uint64_t ds_unneeded=branch_regs[i].u;
6121     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6122     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6123     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6124     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6125     ds_unneeded|=1;
6126     ds_unneeded_upper|=1;
6127     // branch taken
6128     if(!nevertaken) {
6129       //assem_debug("1:\n");
6130       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6131                     ds_unneeded,ds_unneeded_upper);
6132       // load regs
6133       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6134       address_generation(i+1,&branch_regs[i],0);
6135       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6136       ds_assemble(i+1,&branch_regs[i]);
6137       cc=get_reg(branch_regs[i].regmap,CCREG);
6138       if(cc==-1) {
6139         emit_loadreg(CCREG,cc=HOST_CCREG);
6140         // CHECK: Is the following instruction (fall thru) allocated ok?
6141       }
6142       assert(cc==HOST_CCREG);
6143       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6144       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6145       assem_debug("cycle count (adj)\n");
6146       if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
6147       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6148       if(internal)
6149         assem_debug("branch: internal\n");
6150       else
6151         assem_debug("branch: external\n");
6152       if(internal&&is_ds[(ba[i]-start)>>2]) {
6153         ds_assemble_entry(i);
6154       }
6155       else {
6156         add_to_linker((int)out,ba[i],internal);
6157         emit_jmp(0);
6158       }
6159     }
6160     // branch not taken
6161     cop1_usable=prev_cop1_usable;
6162     if(!unconditional) {
6163       set_jump_target(nottaken,(int)out);
6164       assem_debug("1:\n");
6165       if(!likely[i]) {
6166         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6167                       ds_unneeded,ds_unneeded_upper);
6168         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6169         address_generation(i+1,&branch_regs[i],0);
6170         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6171         ds_assemble(i+1,&branch_regs[i]);
6172       }
6173       cc=get_reg(branch_regs[i].regmap,CCREG);
6174       if(cc==-1&&!likely[i]) {
6175         // Cycle count isn't in a register, temporarily load it then write it out
6176         emit_loadreg(CCREG,HOST_CCREG);
6177         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
6178         int jaddr=(int)out;
6179         emit_jns(0);
6180         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6181         emit_storereg(CCREG,HOST_CCREG);
6182       }
6183       else{
6184         cc=get_reg(i_regmap,CCREG);
6185         assert(cc==HOST_CCREG);
6186         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
6187         int jaddr=(int)out;
6188         emit_jns(0);
6189         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6190       }
6191     }
6192   }
6193 }
6194
6195 void fjump_assemble(int i,struct regstat *i_regs)
6196 {
6197   signed char *i_regmap=i_regs->regmap;
6198   int cc;
6199   int match;
6200   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6201   assem_debug("fmatch=%d\n",match);
6202   int fs,cs;
6203   int eaddr;
6204   int invert=0;
6205   int internal=internal_branch(branch_regs[i].is32,ba[i]);
6206   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
6207   if(!match) invert=1;
6208   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6209   if(i>(ba[i]-start)>>2) invert=1;
6210   #endif
6211
6212   if(ooo[i]) {
6213     fs=get_reg(branch_regs[i].regmap,FSREG);
6214     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6215   }
6216   else {
6217     fs=get_reg(i_regmap,FSREG);
6218   }
6219
6220   // Check cop1 unusable
6221   if(!cop1_usable) {
6222     cs=get_reg(i_regmap,CSREG);
6223     assert(cs>=0);
6224     emit_testimm(cs,0x20000000);
6225     eaddr=(int)out;
6226     emit_jeq(0);
6227     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6228     cop1_usable=1;
6229   }
6230
6231   if(ooo[i]) {
6232     // Out of order execution (delay slot first)
6233     //printf("OOOE\n");
6234     ds_assemble(i+1,i_regs);
6235     int adj;
6236     uint64_t bc_unneeded=branch_regs[i].u;
6237     uint64_t bc_unneeded_upper=branch_regs[i].uu;
6238     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6239     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6240     bc_unneeded|=1;
6241     bc_unneeded_upper|=1;
6242     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6243                   bc_unneeded,bc_unneeded_upper);
6244     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6245     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6246     cc=get_reg(branch_regs[i].regmap,CCREG);
6247     assert(cc==HOST_CCREG);
6248     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6249     assem_debug("cycle count (adj)\n");
6250     if(1) {
6251       int nottaken=0;
6252       if(adj&&!invert) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
6253       if(1) {
6254         assert(fs>=0);
6255         emit_testimm(fs,0x800000);
6256         if(source[i]&0x10000) // BC1T
6257         {
6258           if(invert){
6259             nottaken=(int)out;
6260             emit_jeq(1);
6261           }else{
6262             add_to_linker((int)out,ba[i],internal);
6263             emit_jne(0);
6264           }
6265         }
6266         else // BC1F
6267           if(invert){
6268             nottaken=(int)out;
6269             emit_jne(1);
6270           }else{
6271             add_to_linker((int)out,ba[i],internal);
6272             emit_jeq(0);
6273           }
6274         {
6275         }
6276       } // if(!only32)
6277
6278       if(invert) {
6279         if(adj) emit_addimm(cc,-CLOCK_ADJUST(adj),cc);
6280         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6281         else if(match) emit_addnop(13);
6282         #endif
6283         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6284         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6285         if(internal)
6286           assem_debug("branch: internal\n");
6287         else
6288           assem_debug("branch: external\n");
6289         if(internal&&is_ds[(ba[i]-start)>>2]) {
6290           ds_assemble_entry(i);
6291         }
6292         else {
6293           add_to_linker((int)out,ba[i],internal);
6294           emit_jmp(0);
6295         }
6296         set_jump_target(nottaken,(int)out);
6297       }
6298
6299       if(adj) {
6300         if(!invert) emit_addimm(cc,CLOCK_ADJUST(adj),cc);
6301       }
6302     } // (!unconditional)
6303   } // if(ooo)
6304   else
6305   {
6306     // In-order execution (branch first)
6307     //printf("IOE\n");
6308     int nottaken=0;
6309     if(1) {
6310       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6311       if(1) {
6312         assert(fs>=0);
6313         emit_testimm(fs,0x800000);
6314         if(source[i]&0x10000) // BC1T
6315         {
6316           nottaken=(int)out;
6317           emit_jeq(1);
6318         }
6319         else // BC1F
6320         {
6321           nottaken=(int)out;
6322           emit_jne(1);
6323         }
6324       }
6325     } // if(!unconditional)
6326     int adj;
6327     uint64_t ds_unneeded=branch_regs[i].u;
6328     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6329     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6330     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6331     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6332     ds_unneeded|=1;
6333     ds_unneeded_upper|=1;
6334     // branch taken
6335     //assem_debug("1:\n");
6336     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6337                   ds_unneeded,ds_unneeded_upper);
6338     // load regs
6339     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6340     address_generation(i+1,&branch_regs[i],0);
6341     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6342     ds_assemble(i+1,&branch_regs[i]);
6343     cc=get_reg(branch_regs[i].regmap,CCREG);
6344     if(cc==-1) {
6345       emit_loadreg(CCREG,cc=HOST_CCREG);
6346       // CHECK: Is the following instruction (fall thru) allocated ok?
6347     }
6348     assert(cc==HOST_CCREG);
6349     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6350     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6351     assem_debug("cycle count (adj)\n");
6352     if(adj) emit_addimm(cc,CLOCK_ADJUST(ccadj[i]+2-adj),cc);
6353     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6354     if(internal)
6355       assem_debug("branch: internal\n");
6356     else
6357       assem_debug("branch: external\n");
6358     if(internal&&is_ds[(ba[i]-start)>>2]) {
6359       ds_assemble_entry(i);
6360     }
6361     else {
6362       add_to_linker((int)out,ba[i],internal);
6363       emit_jmp(0);
6364     }
6365
6366     // branch not taken
6367     if(1) { // <- FIXME (don't need this)
6368       set_jump_target(nottaken,(int)out);
6369       assem_debug("1:\n");
6370       if(!likely[i]) {
6371         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6372                       ds_unneeded,ds_unneeded_upper);
6373         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6374         address_generation(i+1,&branch_regs[i],0);
6375         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6376         ds_assemble(i+1,&branch_regs[i]);
6377       }
6378       cc=get_reg(branch_regs[i].regmap,CCREG);
6379       if(cc==-1&&!likely[i]) {
6380         // Cycle count isn't in a register, temporarily load it then write it out
6381         emit_loadreg(CCREG,HOST_CCREG);
6382         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
6383         int jaddr=(int)out;
6384         emit_jns(0);
6385         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6386         emit_storereg(CCREG,HOST_CCREG);
6387       }
6388       else{
6389         cc=get_reg(i_regmap,CCREG);
6390         assert(cc==HOST_CCREG);
6391         emit_addimm_and_set_flags(CLOCK_ADJUST(ccadj[i]+2),cc);
6392         int jaddr=(int)out;
6393         emit_jns(0);
6394         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6395       }
6396     }
6397   }
6398 }
6399
6400 static void pagespan_assemble(int i,struct regstat *i_regs)
6401 {
6402   int s1l=get_reg(i_regs->regmap,rs1[i]);
6403   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6404   int s2l=get_reg(i_regs->regmap,rs2[i]);
6405   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6406   void *nt_branch=NULL;
6407   int taken=0;
6408   int nottaken=0;
6409   int unconditional=0;
6410   if(rs1[i]==0)
6411   {
6412     s1l=s2l;s1h=s2h;
6413     s2l=s2h=-1;
6414   }
6415   else if(rs2[i]==0)
6416   {
6417     s2l=s2h=-1;
6418   }
6419   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6420     s1h=s2h=-1;
6421   }
6422   int hr=0;
6423   int addr,alt,ntaddr;
6424   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6425   else {
6426     while(hr<HOST_REGS)
6427     {
6428       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6429          (i_regs->regmap[hr]&63)!=rs1[i] &&
6430          (i_regs->regmap[hr]&63)!=rs2[i] )
6431       {
6432         addr=hr++;break;
6433       }
6434       hr++;
6435     }
6436   }
6437   while(hr<HOST_REGS)
6438   {
6439     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6440        (i_regs->regmap[hr]&63)!=rs1[i] &&
6441        (i_regs->regmap[hr]&63)!=rs2[i] )
6442     {
6443       alt=hr++;break;
6444     }
6445     hr++;
6446   }
6447   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6448   {
6449     while(hr<HOST_REGS)
6450     {
6451       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6452          (i_regs->regmap[hr]&63)!=rs1[i] &&
6453          (i_regs->regmap[hr]&63)!=rs2[i] )
6454       {
6455         ntaddr=hr;break;
6456       }
6457       hr++;
6458     }
6459   }
6460   assert(hr<HOST_REGS);
6461   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6462     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6463   }
6464   emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i]+2),HOST_CCREG);
6465   if(opcode[i]==2) // J
6466   {
6467     unconditional=1;
6468   }
6469   if(opcode[i]==3) // JAL
6470   {
6471     // TODO: mini_ht
6472     int rt=get_reg(i_regs->regmap,31);
6473     emit_movimm(start+i*4+8,rt);
6474     unconditional=1;
6475   }
6476   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6477   {
6478     emit_mov(s1l,addr);
6479     if(opcode2[i]==9) // JALR
6480     {
6481       int rt=get_reg(i_regs->regmap,rt1[i]);
6482       emit_movimm(start+i*4+8,rt);
6483     }
6484   }
6485   if((opcode[i]&0x3f)==4) // BEQ
6486   {
6487     if(rs1[i]==rs2[i])
6488     {
6489       unconditional=1;
6490     }
6491     else
6492     #ifdef HAVE_CMOV_IMM
6493     if(s1h<0) {
6494       if(s2l>=0) emit_cmp(s1l,s2l);
6495       else emit_test(s1l,s1l);
6496       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6497     }
6498     else
6499     #endif
6500     {
6501       assert(s1l>=0);
6502       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6503       if(s1h>=0) {
6504         if(s2h>=0) emit_cmp(s1h,s2h);
6505         else emit_test(s1h,s1h);
6506         emit_cmovne_reg(alt,addr);
6507       }
6508       if(s2l>=0) emit_cmp(s1l,s2l);
6509       else emit_test(s1l,s1l);
6510       emit_cmovne_reg(alt,addr);
6511     }
6512   }
6513   if((opcode[i]&0x3f)==5) // BNE
6514   {
6515     #ifdef HAVE_CMOV_IMM
6516     if(s1h<0) {
6517       if(s2l>=0) emit_cmp(s1l,s2l);
6518       else emit_test(s1l,s1l);
6519       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6520     }
6521     else
6522     #endif
6523     {
6524       assert(s1l>=0);
6525       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6526       if(s1h>=0) {
6527         if(s2h>=0) emit_cmp(s1h,s2h);
6528         else emit_test(s1h,s1h);
6529         emit_cmovne_reg(alt,addr);
6530       }
6531       if(s2l>=0) emit_cmp(s1l,s2l);
6532       else emit_test(s1l,s1l);
6533       emit_cmovne_reg(alt,addr);
6534     }
6535   }
6536   if((opcode[i]&0x3f)==0x14) // BEQL
6537   {
6538     if(s1h>=0) {
6539       if(s2h>=0) emit_cmp(s1h,s2h);
6540       else emit_test(s1h,s1h);
6541       nottaken=(int)out;
6542       emit_jne(0);
6543     }
6544     if(s2l>=0) emit_cmp(s1l,s2l);
6545     else emit_test(s1l,s1l);
6546     if(nottaken) set_jump_target(nottaken,(int)out);
6547     nottaken=(int)out;
6548     emit_jne(0);
6549   }
6550   if((opcode[i]&0x3f)==0x15) // BNEL
6551   {
6552     if(s1h>=0) {
6553       if(s2h>=0) emit_cmp(s1h,s2h);
6554       else emit_test(s1h,s1h);
6555       taken=(int)out;
6556       emit_jne(0);
6557     }
6558     if(s2l>=0) emit_cmp(s1l,s2l);
6559     else emit_test(s1l,s1l);
6560     nottaken=(int)out;
6561     emit_jeq(0);
6562     if(taken) set_jump_target(taken,(int)out);
6563   }
6564   if((opcode[i]&0x3f)==6) // BLEZ
6565   {
6566     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6567     emit_cmpimm(s1l,1);
6568     if(s1h>=0) emit_mov(addr,ntaddr);
6569     emit_cmovl_reg(alt,addr);
6570     if(s1h>=0) {
6571       emit_test(s1h,s1h);
6572       emit_cmovne_reg(ntaddr,addr);
6573       emit_cmovs_reg(alt,addr);
6574     }
6575   }
6576   if((opcode[i]&0x3f)==7) // BGTZ
6577   {
6578     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6579     emit_cmpimm(s1l,1);
6580     if(s1h>=0) emit_mov(addr,alt);
6581     emit_cmovl_reg(ntaddr,addr);
6582     if(s1h>=0) {
6583       emit_test(s1h,s1h);
6584       emit_cmovne_reg(alt,addr);
6585       emit_cmovs_reg(ntaddr,addr);
6586     }
6587   }
6588   if((opcode[i]&0x3f)==0x16) // BLEZL
6589   {
6590     assert((opcode[i]&0x3f)!=0x16);
6591   }
6592   if((opcode[i]&0x3f)==0x17) // BGTZL
6593   {
6594     assert((opcode[i]&0x3f)!=0x17);
6595   }
6596   assert(opcode[i]!=1); // BLTZ/BGEZ
6597
6598   //FIXME: Check CSREG
6599   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6600     if((source[i]&0x30000)==0) // BC1F
6601     {
6602       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6603       emit_testimm(s1l,0x800000);
6604       emit_cmovne_reg(alt,addr);
6605     }
6606     if((source[i]&0x30000)==0x10000) // BC1T
6607     {
6608       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6609       emit_testimm(s1l,0x800000);
6610       emit_cmovne_reg(alt,addr);
6611     }
6612     if((source[i]&0x30000)==0x20000) // BC1FL
6613     {
6614       emit_testimm(s1l,0x800000);
6615       nottaken=(int)out;
6616       emit_jne(0);
6617     }
6618     if((source[i]&0x30000)==0x30000) // BC1TL
6619     {
6620       emit_testimm(s1l,0x800000);
6621       nottaken=(int)out;
6622       emit_jeq(0);
6623     }
6624   }
6625
6626   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6627   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6628   if(likely[i]||unconditional)
6629   {
6630     emit_movimm(ba[i],HOST_BTREG);
6631   }
6632   else if(addr!=HOST_BTREG)
6633   {
6634     emit_mov(addr,HOST_BTREG);
6635   }
6636   void *branch_addr=out;
6637   emit_jmp(0);
6638   int target_addr=start+i*4+5;
6639   void *stub=out;
6640   void *compiled_target_addr=check_addr(target_addr);
6641   emit_extjump_ds((int)branch_addr,target_addr);
6642   if(compiled_target_addr) {
6643     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6644     add_link(target_addr,stub);
6645   }
6646   else set_jump_target((int)branch_addr,(int)stub);
6647   if(likely[i]) {
6648     // Not-taken path
6649     set_jump_target((int)nottaken,(int)out);
6650     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6651     void *branch_addr=out;
6652     emit_jmp(0);
6653     int target_addr=start+i*4+8;
6654     void *stub=out;
6655     void *compiled_target_addr=check_addr(target_addr);
6656     emit_extjump_ds((int)branch_addr,target_addr);
6657     if(compiled_target_addr) {
6658       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6659       add_link(target_addr,stub);
6660     }
6661     else set_jump_target((int)branch_addr,(int)stub);
6662   }
6663 }
6664
6665 // Assemble the delay slot for the above
6666 static void pagespan_ds()
6667 {
6668   assem_debug("initial delay slot:\n");
6669   u_int vaddr=start+1;
6670   u_int page=get_page(vaddr);
6671   u_int vpage=get_vpage(vaddr);
6672   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6673   do_dirty_stub_ds();
6674   ll_add(jump_in+page,vaddr,(void *)out);
6675   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6676   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6677     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6678   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6679     emit_writeword(HOST_BTREG,(int)&branch_target);
6680   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6681   address_generation(0,&regs[0],regs[0].regmap_entry);
6682   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6683     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6684   cop1_usable=0;
6685   is_delayslot=0;
6686   switch(itype[0]) {
6687     case ALU:
6688       alu_assemble(0,&regs[0]);break;
6689     case IMM16:
6690       imm16_assemble(0,&regs[0]);break;
6691     case SHIFT:
6692       shift_assemble(0,&regs[0]);break;
6693     case SHIFTIMM:
6694       shiftimm_assemble(0,&regs[0]);break;
6695     case LOAD:
6696       load_assemble(0,&regs[0]);break;
6697     case LOADLR:
6698       loadlr_assemble(0,&regs[0]);break;
6699     case STORE:
6700       store_assemble(0,&regs[0]);break;
6701     case STORELR:
6702       storelr_assemble(0,&regs[0]);break;
6703     case COP0:
6704       cop0_assemble(0,&regs[0]);break;
6705     case COP1:
6706       cop1_assemble(0,&regs[0]);break;
6707     case C1LS:
6708       c1ls_assemble(0,&regs[0]);break;
6709     case COP2:
6710       cop2_assemble(0,&regs[0]);break;
6711     case C2LS:
6712       c2ls_assemble(0,&regs[0]);break;
6713     case C2OP:
6714       c2op_assemble(0,&regs[0]);break;
6715     case FCONV:
6716       fconv_assemble(0,&regs[0]);break;
6717     case FLOAT:
6718       float_assemble(0,&regs[0]);break;
6719     case FCOMP:
6720       fcomp_assemble(0,&regs[0]);break;
6721     case MULTDIV:
6722       multdiv_assemble(0,&regs[0]);break;
6723     case MOV:
6724       mov_assemble(0,&regs[0]);break;
6725     case SYSCALL:
6726     case HLECALL:
6727     case INTCALL:
6728     case SPAN:
6729     case UJUMP:
6730     case RJUMP:
6731     case CJUMP:
6732     case SJUMP:
6733     case FJUMP:
6734       SysPrintf("Jump in the delay slot.  This is probably a bug.\n");
6735   }
6736   int btaddr=get_reg(regs[0].regmap,BTREG);
6737   if(btaddr<0) {
6738     btaddr=get_reg(regs[0].regmap,-1);
6739     emit_readword((int)&branch_target,btaddr);
6740   }
6741   assert(btaddr!=HOST_CCREG);
6742   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6743 #ifdef HOST_IMM8
6744   emit_movimm(start+4,HOST_TEMPREG);
6745   emit_cmp(btaddr,HOST_TEMPREG);
6746 #else
6747   emit_cmpimm(btaddr,start+4);
6748 #endif
6749   int branch=(int)out;
6750   emit_jeq(0);
6751   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6752   emit_jmp(jump_vaddr_reg[btaddr]);
6753   set_jump_target(branch,(int)out);
6754   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6755   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6756 }
6757
6758 // Basic liveness analysis for MIPS registers
6759 void unneeded_registers(int istart,int iend,int r)
6760 {
6761   int i;
6762   uint64_t u,uu,gte_u,b,bu,gte_bu;
6763   uint64_t temp_u,temp_uu,temp_gte_u=0;
6764   uint64_t tdep;
6765   uint64_t gte_u_unknown=0;
6766   if(new_dynarec_hacks&NDHACK_GTE_UNNEEDED)
6767     gte_u_unknown=~0ll;
6768   if(iend==slen-1) {
6769     u=1;uu=1;
6770     gte_u=gte_u_unknown;
6771   }else{
6772     u=unneeded_reg[iend+1];
6773     uu=unneeded_reg_upper[iend+1];
6774     u=1;uu=1;
6775     gte_u=gte_unneeded[iend+1];
6776   }
6777
6778   for (i=iend;i>=istart;i--)
6779   {
6780     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6781     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6782     {
6783       // If subroutine call, flag return address as a possible branch target
6784       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6785
6786       if(ba[i]<start || ba[i]>=(start+slen*4))
6787       {
6788         // Branch out of this block, flush all regs
6789         u=1;
6790         uu=1;
6791         gte_u=gte_u_unknown;
6792         /* Hexagon hack
6793         if(itype[i]==UJUMP&&rt1[i]==31)
6794         {
6795           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6796         }
6797         if(itype[i]==RJUMP&&rs1[i]==31)
6798         {
6799           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6800         }
6801         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6802           if(itype[i]==UJUMP&&rt1[i]==31)
6803           {
6804             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6805             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6806           }
6807           if(itype[i]==RJUMP&&rs1[i]==31)
6808           {
6809             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6810             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6811           }
6812         }*/
6813         branch_unneeded_reg[i]=u;
6814         branch_unneeded_reg_upper[i]=uu;
6815         // Merge in delay slot
6816         tdep=(~uu>>rt1[i+1])&1;
6817         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6818         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6819         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6820         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6821         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6822         u|=1;uu|=1;
6823         gte_u|=gte_rt[i+1];
6824         gte_u&=~gte_rs[i+1];
6825         // If branch is "likely" (and conditional)
6826         // then we skip the delay slot on the fall-thru path
6827         if(likely[i]) {
6828           if(i<slen-1) {
6829             u&=unneeded_reg[i+2];
6830             uu&=unneeded_reg_upper[i+2];
6831             gte_u&=gte_unneeded[i+2];
6832           }
6833           else
6834           {
6835             u=1;
6836             uu=1;
6837             gte_u=gte_u_unknown;
6838           }
6839         }
6840       }
6841       else
6842       {
6843         // Internal branch, flag target
6844         bt[(ba[i]-start)>>2]=1;
6845         if(ba[i]<=start+i*4) {
6846           // Backward branch
6847           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6848           {
6849             // Unconditional branch
6850             temp_u=1;temp_uu=1;
6851             temp_gte_u=0;
6852           } else {
6853             // Conditional branch (not taken case)
6854             temp_u=unneeded_reg[i+2];
6855             temp_uu=unneeded_reg_upper[i+2];
6856             temp_gte_u&=gte_unneeded[i+2];
6857           }
6858           // Merge in delay slot
6859           tdep=(~temp_uu>>rt1[i+1])&1;
6860           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6861           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6862           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6863           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6864           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6865           temp_u|=1;temp_uu|=1;
6866           temp_gte_u|=gte_rt[i+1];
6867           temp_gte_u&=~gte_rs[i+1];
6868           // If branch is "likely" (and conditional)
6869           // then we skip the delay slot on the fall-thru path
6870           if(likely[i]) {
6871             if(i<slen-1) {
6872               temp_u&=unneeded_reg[i+2];
6873               temp_uu&=unneeded_reg_upper[i+2];
6874               temp_gte_u&=gte_unneeded[i+2];
6875             }
6876             else
6877             {
6878               temp_u=1;
6879               temp_uu=1;
6880               temp_gte_u=gte_u_unknown;
6881             }
6882           }
6883           tdep=(~temp_uu>>rt1[i])&1;
6884           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6885           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6886           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6887           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6888           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6889           temp_u|=1;temp_uu|=1;
6890           temp_gte_u|=gte_rt[i];
6891           temp_gte_u&=~gte_rs[i];
6892           unneeded_reg[i]=temp_u;
6893           unneeded_reg_upper[i]=temp_uu;
6894           gte_unneeded[i]=temp_gte_u;
6895           // Only go three levels deep.  This recursion can take an
6896           // excessive amount of time if there are a lot of nested loops.
6897           if(r<2) {
6898             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6899           }else{
6900             unneeded_reg[(ba[i]-start)>>2]=1;
6901             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6902             gte_unneeded[(ba[i]-start)>>2]=gte_u_unknown;
6903           }
6904         } /*else*/ if(1) {
6905           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6906           {
6907             // Unconditional branch
6908             u=unneeded_reg[(ba[i]-start)>>2];
6909             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6910             gte_u=gte_unneeded[(ba[i]-start)>>2];
6911             branch_unneeded_reg[i]=u;
6912             branch_unneeded_reg_upper[i]=uu;
6913         //u=1;
6914         //uu=1;
6915         //branch_unneeded_reg[i]=u;
6916         //branch_unneeded_reg_upper[i]=uu;
6917             // Merge in delay slot
6918             tdep=(~uu>>rt1[i+1])&1;
6919             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6920             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6921             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6922             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6923             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6924             u|=1;uu|=1;
6925             gte_u|=gte_rt[i+1];
6926             gte_u&=~gte_rs[i+1];
6927           } else {
6928             // Conditional branch
6929             b=unneeded_reg[(ba[i]-start)>>2];
6930             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6931             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6932             branch_unneeded_reg[i]=b;
6933             branch_unneeded_reg_upper[i]=bu;
6934         //b=1;
6935         //bu=1;
6936         //branch_unneeded_reg[i]=b;
6937         //branch_unneeded_reg_upper[i]=bu;
6938             // Branch delay slot
6939             tdep=(~uu>>rt1[i+1])&1;
6940             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6941             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6942             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6943             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6944             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6945             b|=1;bu|=1;
6946             gte_bu|=gte_rt[i+1];
6947             gte_bu&=~gte_rs[i+1];
6948             // If branch is "likely" then we skip the
6949             // delay slot on the fall-thru path
6950             if(likely[i]) {
6951               u=b;
6952               uu=bu;
6953               gte_u=gte_bu;
6954               if(i<slen-1) {
6955                 u&=unneeded_reg[i+2];
6956                 uu&=unneeded_reg_upper[i+2];
6957                 gte_u&=gte_unneeded[i+2];
6958         //u=1;
6959         //uu=1;
6960               }
6961             } else {
6962               u&=b;
6963               uu&=bu;
6964               gte_u&=gte_bu;
6965         //u=1;
6966         //uu=1;
6967             }
6968             if(i<slen-1) {
6969               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6970               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6971         //branch_unneeded_reg[i]=1;
6972         //branch_unneeded_reg_upper[i]=1;
6973             } else {
6974               branch_unneeded_reg[i]=1;
6975               branch_unneeded_reg_upper[i]=1;
6976             }
6977           }
6978         }
6979       }
6980     }
6981     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6982     {
6983       // SYSCALL instruction (software interrupt)
6984       u=1;
6985       uu=1;
6986     }
6987     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6988     {
6989       // ERET instruction (return from interrupt)
6990       u=1;
6991       uu=1;
6992     }
6993     //u=uu=1; // DEBUG
6994     tdep=(~uu>>rt1[i])&1;
6995     // Written registers are unneeded
6996     u|=1LL<<rt1[i];
6997     u|=1LL<<rt2[i];
6998     uu|=1LL<<rt1[i];
6999     uu|=1LL<<rt2[i];
7000     gte_u|=gte_rt[i];
7001     // Accessed registers are needed
7002     u&=~(1LL<<rs1[i]);
7003     u&=~(1LL<<rs2[i]);
7004     uu&=~(1LL<<us1[i]);
7005     uu&=~(1LL<<us2[i]);
7006     gte_u&=~gte_rs[i];
7007     if(gte_rs[i]&&rt1[i]&&(unneeded_reg[i+1]&(1ll<<rt1[i])))
7008       gte_u|=gte_rs[i]&gte_unneeded[i+1]; // MFC2/CFC2 to dead register, unneeded
7009     // Source-target dependencies
7010     uu&=~(tdep<<dep1[i]);
7011     uu&=~(tdep<<dep2[i]);
7012     // R0 is always unneeded
7013     u|=1;uu|=1;
7014     // Save it
7015     unneeded_reg[i]=u;
7016     unneeded_reg_upper[i]=uu;
7017     gte_unneeded[i]=gte_u;
7018     /*
7019     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
7020     printf("U:");
7021     int r;
7022     for(r=1;r<=CCREG;r++) {
7023       if((unneeded_reg[i]>>r)&1) {
7024         if(r==HIREG) printf(" HI");
7025         else if(r==LOREG) printf(" LO");
7026         else printf(" r%d",r);
7027       }
7028     }
7029     printf(" UU:");
7030     for(r=1;r<=CCREG;r++) {
7031       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
7032         if(r==HIREG) printf(" HI");
7033         else if(r==LOREG) printf(" LO");
7034         else printf(" r%d",r);
7035       }
7036     }
7037     printf("\n");*/
7038   }
7039 #ifdef FORCE32
7040   for (i=iend;i>=istart;i--)
7041   {
7042     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
7043   }
7044 #endif
7045 }
7046
7047 // Identify registers which are likely to contain 32-bit values
7048 // This is used to predict whether any branches will jump to a
7049 // location with 64-bit values in registers.
7050 static void provisional_32bit()
7051 {
7052   int i,j;
7053   uint64_t is32=1;
7054   uint64_t lastbranch=1;
7055
7056   for(i=0;i<slen;i++)
7057   {
7058     if(i>0) {
7059       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
7060         if(i>1) is32=lastbranch;
7061         else is32=1;
7062       }
7063     }
7064     if(i>1)
7065     {
7066       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
7067         if(likely[i-2]) {
7068           if(i>2) is32=lastbranch;
7069           else is32=1;
7070         }
7071       }
7072       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7073       {
7074         if(rs1[i-2]==0||rs2[i-2]==0)
7075         {
7076           if(rs1[i-2]) {
7077             is32|=1LL<<rs1[i-2];
7078           }
7079           if(rs2[i-2]) {
7080             is32|=1LL<<rs2[i-2];
7081           }
7082         }
7083       }
7084     }
7085     // If something jumps here with 64-bit values
7086     // then promote those registers to 64 bits
7087     if(bt[i])
7088     {
7089       uint64_t temp_is32=is32;
7090       for(j=i-1;j>=0;j--)
7091       {
7092         if(ba[j]==start+i*4)
7093           //temp_is32&=branch_regs[j].is32;
7094           temp_is32&=p32[j];
7095       }
7096       for(j=i;j<slen;j++)
7097       {
7098         if(ba[j]==start+i*4)
7099           temp_is32=1;
7100       }
7101       is32=temp_is32;
7102     }
7103     int type=itype[i];
7104     int op=opcode[i];
7105     int op2=opcode2[i];
7106     int rt=rt1[i];
7107     int s1=rs1[i];
7108     int s2=rs2[i];
7109     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7110       // Branches don't write registers, consider the delay slot instead.
7111       type=itype[i+1];
7112       op=opcode[i+1];
7113       op2=opcode2[i+1];
7114       rt=rt1[i+1];
7115       s1=rs1[i+1];
7116       s2=rs2[i+1];
7117       lastbranch=is32;
7118     }
7119     switch(type) {
7120       case LOAD:
7121         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
7122            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
7123           is32&=~(1LL<<rt);
7124         else
7125           is32|=1LL<<rt;
7126         break;
7127       case STORE:
7128       case STORELR:
7129         break;
7130       case LOADLR:
7131         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
7132         if(op==0x22) is32|=1LL<<rt; // LWL
7133         break;
7134       case IMM16:
7135         if (op==0x08||op==0x09|| // ADDI/ADDIU
7136             op==0x0a||op==0x0b|| // SLTI/SLTIU
7137             op==0x0c|| // ANDI
7138             op==0x0f)  // LUI
7139         {
7140           is32|=1LL<<rt;
7141         }
7142         if(op==0x18||op==0x19) { // DADDI/DADDIU
7143           is32&=~(1LL<<rt);
7144           //if(imm[i]==0)
7145           //  is32|=((is32>>s1)&1LL)<<rt;
7146         }
7147         if(op==0x0d||op==0x0e) { // ORI/XORI
7148           uint64_t sr=((is32>>s1)&1LL);
7149           is32&=~(1LL<<rt);
7150           is32|=sr<<rt;
7151         }
7152         break;
7153       case UJUMP:
7154         break;
7155       case RJUMP:
7156         break;
7157       case CJUMP:
7158         break;
7159       case SJUMP:
7160         break;
7161       case FJUMP:
7162         break;
7163       case ALU:
7164         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
7165           is32|=1LL<<rt;
7166         }
7167         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7168           is32|=1LL<<rt;
7169         }
7170         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7171           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
7172           is32&=~(1LL<<rt);
7173           is32|=sr<<rt;
7174         }
7175         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
7176           if(s1==0&&s2==0) {
7177             is32|=1LL<<rt;
7178           }
7179           else if(s2==0) {
7180             uint64_t sr=((is32>>s1)&1LL);
7181             is32&=~(1LL<<rt);
7182             is32|=sr<<rt;
7183           }
7184           else if(s1==0) {
7185             uint64_t sr=((is32>>s2)&1LL);
7186             is32&=~(1LL<<rt);
7187             is32|=sr<<rt;
7188           }
7189           else {
7190             is32&=~(1LL<<rt);
7191           }
7192         }
7193         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
7194           if(s1==0&&s2==0) {
7195             is32|=1LL<<rt;
7196           }
7197           else if(s2==0) {
7198             uint64_t sr=((is32>>s1)&1LL);
7199             is32&=~(1LL<<rt);
7200             is32|=sr<<rt;
7201           }
7202           else {
7203             is32&=~(1LL<<rt);
7204           }
7205         }
7206         break;
7207       case MULTDIV:
7208         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7209           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7210         }
7211         else {
7212           is32|=(1LL<<HIREG)|(1LL<<LOREG);
7213         }
7214         break;
7215       case MOV:
7216         {
7217           uint64_t sr=((is32>>s1)&1LL);
7218           is32&=~(1LL<<rt);
7219           is32|=sr<<rt;
7220         }
7221         break;
7222       case SHIFT:
7223         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7224         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7225         break;
7226       case SHIFTIMM:
7227         is32|=1LL<<rt;
7228         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7229         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7230         break;
7231       case COP0:
7232         if(op2==0) is32|=1LL<<rt; // MFC0
7233         break;
7234       case COP1:
7235       case COP2:
7236         if(op2==0) is32|=1LL<<rt; // MFC1
7237         if(op2==1) is32&=~(1LL<<rt); // DMFC1
7238         if(op2==2) is32|=1LL<<rt; // CFC1
7239         break;
7240       case C1LS:
7241       case C2LS:
7242         break;
7243       case FLOAT:
7244       case FCONV:
7245         break;
7246       case FCOMP:
7247         break;
7248       case C2OP:
7249       case SYSCALL:
7250       case HLECALL:
7251         break;
7252       default:
7253         break;
7254     }
7255     is32|=1;
7256     p32[i]=is32;
7257
7258     if(i>0)
7259     {
7260       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7261       {
7262         if(rt1[i-1]==31) // JAL/JALR
7263         {
7264           // Subroutine call will return here, don't alloc any registers
7265           is32=1;
7266         }
7267         else if(i+1<slen)
7268         {
7269           // Internal branch will jump here, match registers to caller
7270           is32=0x3FFFFFFFFLL;
7271         }
7272       }
7273     }
7274   }
7275 }
7276
7277 // Identify registers which may be assumed to contain 32-bit values
7278 // and where optimizations will rely on this.
7279 // This is used to determine whether backward branches can safely
7280 // jump to a location with 64-bit values in registers.
7281 static void provisional_r32()
7282 {
7283   u_int r32=0;
7284   int i;
7285
7286   for (i=slen-1;i>=0;i--)
7287   {
7288     int hr;
7289     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7290     {
7291       if(ba[i]<start || ba[i]>=(start+slen*4))
7292       {
7293         // Branch out of this block, don't need anything
7294         r32=0;
7295       }
7296       else
7297       {
7298         // Internal branch
7299         // Need whatever matches the target
7300         // (and doesn't get overwritten by the delay slot instruction)
7301         r32=0;
7302         int t=(ba[i]-start)>>2;
7303         if(ba[i]>start+i*4) {
7304           // Forward branch
7305           //if(!(requires_32bit[t]&~regs[i].was32))
7306           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7307           if(!(pr32[t]&~regs[i].was32))
7308             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7309         }else{
7310           // Backward branch
7311           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7312             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7313         }
7314       }
7315       // Conditional branch may need registers for following instructions
7316       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7317       {
7318         if(i<slen-2) {
7319           //r32|=requires_32bit[i+2];
7320           r32|=pr32[i+2];
7321           r32&=regs[i].was32;
7322           // Mark this address as a branch target since it may be called
7323           // upon return from interrupt
7324           //bt[i+2]=1;
7325         }
7326       }
7327       // Merge in delay slot
7328       if(!likely[i]) {
7329         // These are overwritten unless the branch is "likely"
7330         // and the delay slot is nullified if not taken
7331         r32&=~(1LL<<rt1[i+1]);
7332         r32&=~(1LL<<rt2[i+1]);
7333       }
7334       // Assume these are needed (delay slot)
7335       if(us1[i+1]>0)
7336       {
7337         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7338       }
7339       if(us2[i+1]>0)
7340       {
7341         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7342       }
7343       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7344       {
7345         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7346       }
7347       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7348       {
7349         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7350       }
7351     }
7352     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7353     {
7354       // SYSCALL instruction (software interrupt)
7355       r32=0;
7356     }
7357     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7358     {
7359       // ERET instruction (return from interrupt)
7360       r32=0;
7361     }
7362     // Check 32 bits
7363     r32&=~(1LL<<rt1[i]);
7364     r32&=~(1LL<<rt2[i]);
7365     if(us1[i]>0)
7366     {
7367       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7368     }
7369     if(us2[i]>0)
7370     {
7371       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7372     }
7373     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7374     {
7375       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7376     }
7377     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7378     {
7379       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7380     }
7381     //requires_32bit[i]=r32;
7382     pr32[i]=r32;
7383
7384     // Dirty registers which are 32-bit, require 32-bit input
7385     // as they will be written as 32-bit values
7386     for(hr=0;hr<HOST_REGS;hr++)
7387     {
7388       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7389         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7390           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7391           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7392           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7393         }
7394       }
7395     }
7396   }
7397 }
7398
7399 // Write back dirty registers as soon as we will no longer modify them,
7400 // so that we don't end up with lots of writes at the branches.
7401 void clean_registers(int istart,int iend,int wr)
7402 {
7403   int i;
7404   int r;
7405   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7406   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7407   if(iend==slen-1) {
7408     will_dirty_i=will_dirty_next=0;
7409     wont_dirty_i=wont_dirty_next=0;
7410   }else{
7411     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7412     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7413   }
7414   for (i=iend;i>=istart;i--)
7415   {
7416     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7417     {
7418       if(ba[i]<start || ba[i]>=(start+slen*4))
7419       {
7420         // Branch out of this block, flush all regs
7421         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7422         {
7423           // Unconditional branch
7424           will_dirty_i=0;
7425           wont_dirty_i=0;
7426           // Merge in delay slot (will dirty)
7427           for(r=0;r<HOST_REGS;r++) {
7428             if(r!=EXCLUDE_REG) {
7429               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7430               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7431               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7432               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7433               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7434               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7435               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7436               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7437               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7438               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7439               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7440               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7441               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7442               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7443             }
7444           }
7445         }
7446         else
7447         {
7448           // Conditional branch
7449           will_dirty_i=0;
7450           wont_dirty_i=wont_dirty_next;
7451           // Merge in delay slot (will dirty)
7452           for(r=0;r<HOST_REGS;r++) {
7453             if(r!=EXCLUDE_REG) {
7454               if(!likely[i]) {
7455                 // Might not dirty if likely branch is not taken
7456                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7457                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7458                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7459                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7460                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7461                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7462                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7463                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7464                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7465                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7466                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7467                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7468                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7469                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7470               }
7471             }
7472           }
7473         }
7474         // Merge in delay slot (wont dirty)
7475         for(r=0;r<HOST_REGS;r++) {
7476           if(r!=EXCLUDE_REG) {
7477             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7478             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7479             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7480             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7481             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7482             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7483             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7484             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7485             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7486             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7487           }
7488         }
7489         if(wr) {
7490           #ifndef DESTRUCTIVE_WRITEBACK
7491           branch_regs[i].dirty&=wont_dirty_i;
7492           #endif
7493           branch_regs[i].dirty|=will_dirty_i;
7494         }
7495       }
7496       else
7497       {
7498         // Internal branch
7499         if(ba[i]<=start+i*4) {
7500           // Backward branch
7501           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7502           {
7503             // Unconditional branch
7504             temp_will_dirty=0;
7505             temp_wont_dirty=0;
7506             // Merge in delay slot (will dirty)
7507             for(r=0;r<HOST_REGS;r++) {
7508               if(r!=EXCLUDE_REG) {
7509                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7510                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7511                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7512                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7513                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7514                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7515                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7516                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7517                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7518                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7519                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7520                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7521                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7522                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7523               }
7524             }
7525           } else {
7526             // Conditional branch (not taken case)
7527             temp_will_dirty=will_dirty_next;
7528             temp_wont_dirty=wont_dirty_next;
7529             // Merge in delay slot (will dirty)
7530             for(r=0;r<HOST_REGS;r++) {
7531               if(r!=EXCLUDE_REG) {
7532                 if(!likely[i]) {
7533                   // Will not dirty if likely branch is not taken
7534                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7535                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7536                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7537                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7538                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7539                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7540                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7541                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7542                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7543                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7544                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7545                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7546                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7547                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7548                 }
7549               }
7550             }
7551           }
7552           // Merge in delay slot (wont dirty)
7553           for(r=0;r<HOST_REGS;r++) {
7554             if(r!=EXCLUDE_REG) {
7555               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7556               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7557               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7558               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7559               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7560               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7561               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7562               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7563               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7564               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7565             }
7566           }
7567           // Deal with changed mappings
7568           if(i<iend) {
7569             for(r=0;r<HOST_REGS;r++) {
7570               if(r!=EXCLUDE_REG) {
7571                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7572                   temp_will_dirty&=~(1<<r);
7573                   temp_wont_dirty&=~(1<<r);
7574                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7575                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7576                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7577                   } else {
7578                     temp_will_dirty|=1<<r;
7579                     temp_wont_dirty|=1<<r;
7580                   }
7581                 }
7582               }
7583             }
7584           }
7585           if(wr) {
7586             will_dirty[i]=temp_will_dirty;
7587             wont_dirty[i]=temp_wont_dirty;
7588             clean_registers((ba[i]-start)>>2,i-1,0);
7589           }else{
7590             // Limit recursion.  It can take an excessive amount
7591             // of time if there are a lot of nested loops.
7592             will_dirty[(ba[i]-start)>>2]=0;
7593             wont_dirty[(ba[i]-start)>>2]=-1;
7594           }
7595         }
7596         /*else*/ if(1)
7597         {
7598           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7599           {
7600             // Unconditional branch
7601             will_dirty_i=0;
7602             wont_dirty_i=0;
7603           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7604             for(r=0;r<HOST_REGS;r++) {
7605               if(r!=EXCLUDE_REG) {
7606                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7607                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7608                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7609                 }
7610                 if(branch_regs[i].regmap[r]>=0) {
7611                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7612                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7613                 }
7614               }
7615             }
7616           //}
7617             // Merge in delay slot
7618             for(r=0;r<HOST_REGS;r++) {
7619               if(r!=EXCLUDE_REG) {
7620                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7621                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7622                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7623                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7624                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7625                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7626                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7627                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7628                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7629                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7630                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7631                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7632                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7633                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7634               }
7635             }
7636           } else {
7637             // Conditional branch
7638             will_dirty_i=will_dirty_next;
7639             wont_dirty_i=wont_dirty_next;
7640           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7641             for(r=0;r<HOST_REGS;r++) {
7642               if(r!=EXCLUDE_REG) {
7643                 signed char target_reg=branch_regs[i].regmap[r];
7644                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7645                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7646                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7647                 }
7648                 else if(target_reg>=0) {
7649                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7650                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7651                 }
7652                 // Treat delay slot as part of branch too
7653                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7654                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7655                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7656                 }
7657                 else
7658                 {
7659                   will_dirty[i+1]&=~(1<<r);
7660                 }*/
7661               }
7662             }
7663           //}
7664             // Merge in delay slot
7665             for(r=0;r<HOST_REGS;r++) {
7666               if(r!=EXCLUDE_REG) {
7667                 if(!likely[i]) {
7668                   // Might not dirty if likely branch is not taken
7669                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7670                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7671                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7672                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7673                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7674                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7675                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7676                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7677                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7678                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7679                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7680                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7681                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7682                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7683                 }
7684               }
7685             }
7686           }
7687           // Merge in delay slot (won't dirty)
7688           for(r=0;r<HOST_REGS;r++) {
7689             if(r!=EXCLUDE_REG) {
7690               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7691               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7692               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7693               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7694               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7695               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7696               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7697               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7698               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7699               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7700             }
7701           }
7702           if(wr) {
7703             #ifndef DESTRUCTIVE_WRITEBACK
7704             branch_regs[i].dirty&=wont_dirty_i;
7705             #endif
7706             branch_regs[i].dirty|=will_dirty_i;
7707           }
7708         }
7709       }
7710     }
7711     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7712     {
7713       // SYSCALL instruction (software interrupt)
7714       will_dirty_i=0;
7715       wont_dirty_i=0;
7716     }
7717     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7718     {
7719       // ERET instruction (return from interrupt)
7720       will_dirty_i=0;
7721       wont_dirty_i=0;
7722     }
7723     will_dirty_next=will_dirty_i;
7724     wont_dirty_next=wont_dirty_i;
7725     for(r=0;r<HOST_REGS;r++) {
7726       if(r!=EXCLUDE_REG) {
7727         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7728         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7729         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7730         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7731         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7732         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7733         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7734         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7735         if(i>istart) {
7736           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP)
7737           {
7738             // Don't store a register immediately after writing it,
7739             // may prevent dual-issue.
7740             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7741             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7742           }
7743         }
7744       }
7745     }
7746     // Save it
7747     will_dirty[i]=will_dirty_i;
7748     wont_dirty[i]=wont_dirty_i;
7749     // Mark registers that won't be dirtied as not dirty
7750     if(wr) {
7751       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7752       for(r=0;r<HOST_REGS;r++) {
7753         if((will_dirty_i>>r)&1) {
7754           printf(" r%d",r);
7755         }
7756       }
7757       printf("\n");*/
7758
7759       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7760         regs[i].dirty|=will_dirty_i;
7761         #ifndef DESTRUCTIVE_WRITEBACK
7762         regs[i].dirty&=wont_dirty_i;
7763         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7764         {
7765           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7766             for(r=0;r<HOST_REGS;r++) {
7767               if(r!=EXCLUDE_REG) {
7768                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7769                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7770                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7771               }
7772             }
7773           }
7774         }
7775         else
7776         {
7777           if(i<iend) {
7778             for(r=0;r<HOST_REGS;r++) {
7779               if(r!=EXCLUDE_REG) {
7780                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7781                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7782                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7783               }
7784             }
7785           }
7786         }
7787         #endif
7788       //}
7789     }
7790     // Deal with changed mappings
7791     temp_will_dirty=will_dirty_i;
7792     temp_wont_dirty=wont_dirty_i;
7793     for(r=0;r<HOST_REGS;r++) {
7794       if(r!=EXCLUDE_REG) {
7795         int nr;
7796         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7797           if(wr) {
7798             #ifndef DESTRUCTIVE_WRITEBACK
7799             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7800             #endif
7801             regs[i].wasdirty|=will_dirty_i&(1<<r);
7802           }
7803         }
7804         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7805           // Register moved to a different register
7806           will_dirty_i&=~(1<<r);
7807           wont_dirty_i&=~(1<<r);
7808           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7809           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7810           if(wr) {
7811             #ifndef DESTRUCTIVE_WRITEBACK
7812             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7813             #endif
7814             regs[i].wasdirty|=will_dirty_i&(1<<r);
7815           }
7816         }
7817         else {
7818           will_dirty_i&=~(1<<r);
7819           wont_dirty_i&=~(1<<r);
7820           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7821             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7822             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7823           } else {
7824             wont_dirty_i|=1<<r;
7825             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7826           }
7827         }
7828       }
7829     }
7830   }
7831 }
7832
7833 #ifdef DISASM
7834   /* disassembly */
7835 void disassemble_inst(int i)
7836 {
7837     if (bt[i]) printf("*"); else printf(" ");
7838     switch(itype[i]) {
7839       case UJUMP:
7840         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7841       case CJUMP:
7842         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7843       case SJUMP:
7844         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7845       case FJUMP:
7846         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7847       case RJUMP:
7848         if (opcode[i]==0x9&&rt1[i]!=31)
7849           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7850         else
7851           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7852         break;
7853       case SPAN:
7854         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7855       case IMM16:
7856         if(opcode[i]==0xf) //LUI
7857           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7858         else
7859           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7860         break;
7861       case LOAD:
7862       case LOADLR:
7863         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7864         break;
7865       case STORE:
7866       case STORELR:
7867         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7868         break;
7869       case ALU:
7870       case SHIFT:
7871         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7872         break;
7873       case MULTDIV:
7874         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7875         break;
7876       case SHIFTIMM:
7877         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7878         break;
7879       case MOV:
7880         if((opcode2[i]&0x1d)==0x10)
7881           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7882         else if((opcode2[i]&0x1d)==0x11)
7883           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7884         else
7885           printf (" %x: %s\n",start+i*4,insn[i]);
7886         break;
7887       case COP0:
7888         if(opcode2[i]==0)
7889           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7890         else if(opcode2[i]==4)
7891           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7892         else printf (" %x: %s\n",start+i*4,insn[i]);
7893         break;
7894       case COP1:
7895         if(opcode2[i]<3)
7896           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7897         else if(opcode2[i]>3)
7898           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7899         else printf (" %x: %s\n",start+i*4,insn[i]);
7900         break;
7901       case COP2:
7902         if(opcode2[i]<3)
7903           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7904         else if(opcode2[i]>3)
7905           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7906         else printf (" %x: %s\n",start+i*4,insn[i]);
7907         break;
7908       case C1LS:
7909         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7910         break;
7911       case C2LS:
7912         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7913         break;
7914       case INTCALL:
7915         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7916         break;
7917       default:
7918         //printf (" %s %8x\n",insn[i],source[i]);
7919         printf (" %x: %s\n",start+i*4,insn[i]);
7920     }
7921 }
7922 #else
7923 static void disassemble_inst(int i) {}
7924 #endif // DISASM
7925
7926 #define DRC_TEST_VAL 0x74657374
7927
7928 static int new_dynarec_test(void)
7929 {
7930   int (*testfunc)(void) = (void *)out;
7931   int ret;
7932   emit_movimm(DRC_TEST_VAL,0); // test
7933   emit_jmpreg(14);
7934   literal_pool(0);
7935 #ifdef __arm__
7936   __clear_cache((void *)testfunc, out);
7937 #endif
7938   SysPrintf("testing if we can run recompiled code..\n");
7939   ret = testfunc();
7940   if (ret == DRC_TEST_VAL)
7941     SysPrintf("test passed.\n");
7942   else
7943     SysPrintf("test failed: %08x\n", ret);
7944   out=(u_char *)BASE_ADDR;
7945   return ret == DRC_TEST_VAL;
7946 }
7947
7948 // clear the state completely, instead of just marking
7949 // things invalid like invalidate_all_pages() does
7950 void new_dynarec_clear_full()
7951 {
7952   int n;
7953   out=(u_char *)BASE_ADDR;
7954   memset(invalid_code,1,sizeof(invalid_code));
7955   memset(hash_table,0xff,sizeof(hash_table));
7956   memset(mini_ht,-1,sizeof(mini_ht));
7957   memset(restore_candidate,0,sizeof(restore_candidate));
7958   memset(shadow,0,sizeof(shadow));
7959   copy=shadow;
7960   expirep=16384; // Expiry pointer, +2 blocks
7961   pending_exception=0;
7962   literalcount=0;
7963   stop_after_jal=0;
7964   inv_code_start=inv_code_end=~0;
7965   // TLB
7966 #ifndef DISABLE_TLB
7967   using_tlb=0;
7968   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7969     memory_map[n]=-1;
7970   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7971     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7972   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7973     memory_map[n]=-1;
7974 #endif
7975   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7976   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7977   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7978 }
7979
7980 void new_dynarec_init()
7981 {
7982   SysPrintf("Init new dynarec\n");
7983   out=(u_char *)BASE_ADDR;
7984 #if defined(VITA)
7985
7986   if (mmap (out, 1<<TARGET_SIZE_2,
7987             0,
7988             0,
7989             -1, 0) <= 0) {
7990     SysPrintf("mmap() failed: %s\n", strerror(errno));
7991   }
7992
7993 #else
7994   #if BASE_ADDR_FIXED
7995     if (mmap (out, 1<<TARGET_SIZE_2,
7996               PROT_READ | PROT_WRITE | PROT_EXEC,
7997               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7998               -1, 0) <= 0) {
7999       SysPrintf("mmap() failed: %s\n", strerror(errno));
8000     }
8001   #else
8002     // not all systems allow execute in data segment by default
8003     if (mprotect(out, 1<<TARGET_SIZE_2, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
8004       SysPrintf("mprotect() failed: %s\n", strerror(errno));
8005   #endif
8006 #endif
8007 #ifdef MUPEN64
8008   rdword=&readmem_dword;
8009   fake_pc.f.r.rs=&readmem_dword;
8010   fake_pc.f.r.rt=&readmem_dword;
8011   fake_pc.f.r.rd=&readmem_dword;
8012 #endif
8013   int n;
8014   cycle_multiplier=200;
8015   new_dynarec_clear_full();
8016 #ifdef HOST_IMM8
8017   // Copy this into local area so we don't have to put it in every literal pool
8018   invc_ptr=invalid_code;
8019 #endif
8020 #ifdef MUPEN64
8021   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
8022     writemem[n] = write_nomem_new;
8023     writememb[n] = write_nomemb_new;
8024     writememh[n] = write_nomemh_new;
8025 #ifndef FORCE32
8026     writememd[n] = write_nomemd_new;
8027 #endif
8028     readmem[n] = read_nomem_new;
8029     readmemb[n] = read_nomemb_new;
8030     readmemh[n] = read_nomemh_new;
8031 #ifndef FORCE32
8032     readmemd[n] = read_nomemd_new;
8033 #endif
8034   }
8035   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
8036     writemem[n] = write_rdram_new;
8037     writememb[n] = write_rdramb_new;
8038     writememh[n] = write_rdramh_new;
8039 #ifndef FORCE32
8040     writememd[n] = write_rdramd_new;
8041 #endif
8042   }
8043   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
8044     writemem[n] = write_nomem_new;
8045     writememb[n] = write_nomemb_new;
8046     writememh[n] = write_nomemh_new;
8047 #ifndef FORCE32
8048     writememd[n] = write_nomemd_new;
8049 #endif
8050     readmem[n] = read_nomem_new;
8051     readmemb[n] = read_nomemb_new;
8052     readmemh[n] = read_nomemh_new;
8053 #ifndef FORCE32
8054     readmemd[n] = read_nomemd_new;
8055 #endif
8056   }
8057 #endif
8058   tlb_hacks();
8059   arch_init();
8060   new_dynarec_test();
8061 #ifndef RAM_FIXED
8062   ram_offset=(u_int)rdram-0x80000000;
8063 #endif
8064   if (ram_offset!=0)
8065     SysPrintf("warning: RAM is not directly mapped, performance will suffer\n");
8066 }
8067
8068 void new_dynarec_cleanup()
8069 {
8070   int n;
8071   #if BASE_ADDR_FIXED
8072   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {SysPrintf("munmap() failed\n");}
8073   #endif
8074   for(n=0;n<4096;n++) ll_clear(jump_in+n);
8075   for(n=0;n<4096;n++) ll_clear(jump_out+n);
8076   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
8077   #ifdef ROM_COPY
8078   if (munmap (ROM_COPY, 67108864) < 0) {SysPrintf("munmap() failed\n");}
8079   #endif
8080 }
8081
8082 static u_int *get_source_start(u_int addr, u_int *limit)
8083 {
8084   if (addr < 0x00200000 ||
8085     (0xa0000000 <= addr && addr < 0xa0200000)) {
8086     // used for BIOS calls mostly?
8087     *limit = (addr&0xa0000000)|0x00200000;
8088     return (u_int *)((u_int)rdram + (addr&0x1fffff));
8089   }
8090   else if (!Config.HLE && (
8091     /* (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
8092     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
8093     // BIOS
8094     *limit = (addr & 0xfff00000) | 0x80000;
8095     return (u_int *)((u_int)psxR + (addr&0x7ffff));
8096   }
8097   else if (addr >= 0x80000000 && addr < 0x80000000+RAM_SIZE) {
8098     *limit = (addr & 0x80600000) + 0x00200000;
8099     return (u_int *)((u_int)rdram + (addr&0x1fffff));
8100   }
8101 }
8102
8103 static u_int scan_for_ret(u_int addr)
8104 {
8105   u_int limit = 0;
8106   u_int *mem;
8107
8108   mem = get_source_start(addr, &limit);
8109   if (mem == NULL)
8110     return addr;
8111
8112   if (limit > addr + 0x1000)
8113     limit = addr + 0x1000;
8114   for (; addr < limit; addr += 4, mem++) {
8115     if (*mem == 0x03e00008) // jr $ra
8116       return addr + 8;
8117   }
8118 }
8119
8120 struct savestate_block {
8121   uint32_t addr;
8122   uint32_t regflags;
8123 };
8124
8125 static int addr_cmp(const void *p1_, const void *p2_)
8126 {
8127   const struct savestate_block *p1 = p1_, *p2 = p2_;
8128   return p1->addr - p2->addr;
8129 }
8130
8131 int new_dynarec_save_blocks(void *save, int size)
8132 {
8133   struct savestate_block *blocks = save;
8134   int maxcount = size / sizeof(blocks[0]);
8135   struct savestate_block tmp_blocks[1024];
8136   struct ll_entry *head;
8137   int p, s, d, o, bcnt;
8138   u_int addr;
8139
8140   o = 0;
8141   for (p = 0; p < sizeof(jump_in) / sizeof(jump_in[0]); p++) {
8142     bcnt = 0;
8143     for (head = jump_in[p]; head != NULL; head = head->next) {
8144       tmp_blocks[bcnt].addr = head->vaddr;
8145       tmp_blocks[bcnt].regflags = head->reg_sv_flags;
8146       bcnt++;
8147     }
8148     if (bcnt < 1)
8149       continue;
8150     qsort(tmp_blocks, bcnt, sizeof(tmp_blocks[0]), addr_cmp);
8151
8152     addr = tmp_blocks[0].addr;
8153     for (s = d = 0; s < bcnt; s++) {
8154       if (tmp_blocks[s].addr < addr)
8155         continue;
8156       if (d == 0 || tmp_blocks[d-1].addr != tmp_blocks[s].addr)
8157         tmp_blocks[d++] = tmp_blocks[s];
8158       addr = scan_for_ret(tmp_blocks[s].addr);
8159     }
8160
8161     if (o + d > maxcount)
8162       d = maxcount - o;
8163     memcpy(&blocks[o], tmp_blocks, d * sizeof(blocks[0]));
8164     o += d;
8165   }
8166
8167   return o * sizeof(blocks[0]);
8168 }
8169
8170 void new_dynarec_load_blocks(const void *save, int size)
8171 {
8172   const struct savestate_block *blocks = save;
8173   int count = size / sizeof(blocks[0]);
8174   u_int regs_save[32];
8175   uint32_t f;
8176   int i, b;
8177
8178   get_addr(psxRegs.pc);
8179
8180   // change GPRs for speculation to at least partially work..
8181   memcpy(regs_save, &psxRegs.GPR, sizeof(regs_save));
8182   for (i = 1; i < 32; i++)
8183     psxRegs.GPR.r[i] = 0x80000000;
8184
8185   for (b = 0; b < count; b++) {
8186     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
8187       if (f & 1)
8188         psxRegs.GPR.r[i] = 0x1f800000;
8189     }
8190
8191     get_addr(blocks[b].addr);
8192
8193     for (f = blocks[b].regflags, i = 0; f; f >>= 1, i++) {
8194       if (f & 1)
8195         psxRegs.GPR.r[i] = 0x80000000;
8196     }
8197   }
8198
8199   memcpy(&psxRegs.GPR, regs_save, sizeof(regs_save));
8200 }
8201
8202 int new_recompile_block(int addr)
8203 {
8204   u_int pagelimit = 0;
8205   u_int state_rflags = 0;
8206   int i;
8207
8208   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8209   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8210   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
8211   //if(debug)
8212   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
8213   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
8214   /*if(Count>=312978186) {
8215     rlist();
8216   }*/
8217   //rlist();
8218
8219   // this is just for speculation
8220   for (i = 1; i < 32; i++) {
8221     if ((psxRegs.GPR.r[i] & 0xffff0000) == 0x1f800000)
8222       state_rflags |= 1 << i;
8223   }
8224
8225   start = (u_int)addr&~3;
8226   //assert(((u_int)addr&1)==0);
8227   new_dynarec_did_compile=1;
8228   if (Config.HLE && start == 0x80001000) // hlecall
8229   {
8230     // XXX: is this enough? Maybe check hleSoftCall?
8231     u_int beginning=(u_int)out;
8232     u_int page=get_page(start);
8233     invalid_code[start>>12]=0;
8234     emit_movimm(start,0);
8235     emit_writeword(0,(int)&pcaddr);
8236     emit_jmp((int)new_dyna_leave);
8237     literal_pool(0);
8238 #ifdef __arm__
8239     __clear_cache((void *)beginning,out);
8240 #endif
8241     ll_add_flags(jump_in+page,start,state_rflags,(void *)beginning);
8242     return 0;
8243   }
8244
8245   source = get_source_start(start, &pagelimit);
8246   if (source == NULL) {
8247     SysPrintf("Compile at bogus memory address: %08x\n", addr);
8248     exit(1);
8249   }
8250
8251   /* Pass 1: disassemble */
8252   /* Pass 2: register dependencies, branch targets */
8253   /* Pass 3: register allocation */
8254   /* Pass 4: branch dependencies */
8255   /* Pass 5: pre-alloc */
8256   /* Pass 6: optimize clean/dirty state */
8257   /* Pass 7: flag 32-bit registers */
8258   /* Pass 8: assembly */
8259   /* Pass 9: linker */
8260   /* Pass 10: garbage collection / free memory */
8261
8262   int j;
8263   int done=0;
8264   unsigned int type,op,op2;
8265
8266   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
8267
8268   /* Pass 1 disassembly */
8269
8270   for(i=0;!done;i++) {
8271     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
8272     minimum_free_regs[i]=0;
8273     opcode[i]=op=source[i]>>26;
8274     switch(op)
8275     {
8276       case 0x00: strcpy(insn[i],"special"); type=NI;
8277         op2=source[i]&0x3f;
8278         switch(op2)
8279         {
8280           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
8281           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
8282           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
8283           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
8284           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
8285           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
8286           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
8287           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
8288           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
8289           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
8290           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
8291           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
8292           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
8293           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
8294           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
8295           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
8296           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
8297           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
8298           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
8299           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
8300           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
8301           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
8302           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
8303           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
8304           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
8305           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
8306           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
8307           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
8308           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
8309           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
8310           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
8311           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
8312           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
8313           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
8314           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
8315 #ifndef FORCE32
8316           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
8317           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
8318           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
8319           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
8320           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
8321           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
8322           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
8323           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
8324           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
8325           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
8326           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
8327           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
8328           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
8329           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
8330           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
8331           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
8332           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
8333 #endif
8334         }
8335         break;
8336       case 0x01: strcpy(insn[i],"regimm"); type=NI;
8337         op2=(source[i]>>16)&0x1f;
8338         switch(op2)
8339         {
8340           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
8341           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8342           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8343           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8344           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8345           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8346           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8347           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8348           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8349           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8350           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8351           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8352           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8353           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8354         }
8355         break;
8356       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8357       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8358       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8359       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8360       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8361       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8362       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8363       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8364       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8365       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8366       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8367       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8368       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8369       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8370       case 0x10: strcpy(insn[i],"cop0"); type=NI;
8371         op2=(source[i]>>21)&0x1f;
8372         switch(op2)
8373         {
8374           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8375           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8376           case 0x10: strcpy(insn[i],"tlb"); type=NI;
8377           switch(source[i]&0x3f)
8378           {
8379             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8380             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8381             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8382             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
8383 #ifdef PCSX
8384             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8385 #else
8386             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
8387 #endif
8388           }
8389         }
8390         break;
8391       case 0x11: strcpy(insn[i],"cop1"); type=NI;
8392         op2=(source[i]>>21)&0x1f;
8393         switch(op2)
8394         {
8395           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8396           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8397           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8398           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8399           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8400           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8401           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8402           switch((source[i]>>16)&0x3)
8403           {
8404             case 0x00: strcpy(insn[i],"BC1F"); break;
8405             case 0x01: strcpy(insn[i],"BC1T"); break;
8406             case 0x02: strcpy(insn[i],"BC1FL"); break;
8407             case 0x03: strcpy(insn[i],"BC1TL"); break;
8408           }
8409           break;
8410           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8411           switch(source[i]&0x3f)
8412           {
8413             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8414             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8415             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8416             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8417             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8418             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8419             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8420             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8421             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8422             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8423             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8424             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8425             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8426             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8427             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8428             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8429             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8430             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8431             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8432             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8433             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8434             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8435             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8436             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8437             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8438             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8439             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8440             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8441             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8442             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8443             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8444             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8445             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8446             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8447             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8448           }
8449           break;
8450           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8451           switch(source[i]&0x3f)
8452           {
8453             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8454             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8455             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8456             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8457             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8458             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8459             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8460             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8461             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8462             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8463             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8464             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8465             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8466             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8467             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8468             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8469             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8470             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8471             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8472             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8473             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8474             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8475             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8476             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8477             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8478             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8479             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8480             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8481             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8482             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8483             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8484             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8485             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8486             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8487             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8488           }
8489           break;
8490           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8491           switch(source[i]&0x3f)
8492           {
8493             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8494             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8495           }
8496           break;
8497           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8498           switch(source[i]&0x3f)
8499           {
8500             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8501             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8502           }
8503           break;
8504         }
8505         break;
8506 #ifndef FORCE32
8507       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8508       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8509       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8510       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8511       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8512       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8513       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8514       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8515 #endif
8516       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8517       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8518       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8519       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8520       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8521       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8522       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8523 #ifndef FORCE32
8524       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8525 #endif
8526       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8527       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8528       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8529       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8530 #ifndef FORCE32
8531       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8532       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8533 #endif
8534       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8535       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8536       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8537       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8538 #ifndef FORCE32
8539       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8540       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8541       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8542 #endif
8543       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8544       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8545 #ifndef FORCE32
8546       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8547       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8548       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8549 #endif
8550 #ifdef PCSX
8551       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8552         op2=(source[i]>>21)&0x1f;
8553         //if (op2 & 0x10) {
8554         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
8555           if (gte_handlers[source[i]&0x3f]!=NULL) {
8556             if (gte_regnames[source[i]&0x3f]!=NULL)
8557               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
8558             else
8559               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8560             type=C2OP;
8561           }
8562         }
8563         else switch(op2)
8564         {
8565           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8566           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8567           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8568           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8569         }
8570         break;
8571       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8572       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8573       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8574 #endif
8575       default: strcpy(insn[i],"???"); type=NI;
8576         SysPrintf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8577         break;
8578     }
8579     itype[i]=type;
8580     opcode2[i]=op2;
8581     /* Get registers/immediates */
8582     lt1[i]=0;
8583     us1[i]=0;
8584     us2[i]=0;
8585     dep1[i]=0;
8586     dep2[i]=0;
8587     gte_rs[i]=gte_rt[i]=0;
8588     switch(type) {
8589       case LOAD:
8590         rs1[i]=(source[i]>>21)&0x1f;
8591         rs2[i]=0;
8592         rt1[i]=(source[i]>>16)&0x1f;
8593         rt2[i]=0;
8594         imm[i]=(short)source[i];
8595         break;
8596       case STORE:
8597       case STORELR:
8598         rs1[i]=(source[i]>>21)&0x1f;
8599         rs2[i]=(source[i]>>16)&0x1f;
8600         rt1[i]=0;
8601         rt2[i]=0;
8602         imm[i]=(short)source[i];
8603         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8604         break;
8605       case LOADLR:
8606         // LWL/LWR only load part of the register,
8607         // therefore the target register must be treated as a source too
8608         rs1[i]=(source[i]>>21)&0x1f;
8609         rs2[i]=(source[i]>>16)&0x1f;
8610         rt1[i]=(source[i]>>16)&0x1f;
8611         rt2[i]=0;
8612         imm[i]=(short)source[i];
8613         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8614         if(op==0x26) dep1[i]=rt1[i]; // LWR
8615         break;
8616       case IMM16:
8617         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8618         else rs1[i]=(source[i]>>21)&0x1f;
8619         rs2[i]=0;
8620         rt1[i]=(source[i]>>16)&0x1f;
8621         rt2[i]=0;
8622         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8623           imm[i]=(unsigned short)source[i];
8624         }else{
8625           imm[i]=(short)source[i];
8626         }
8627         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8628         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8629         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8630         break;
8631       case UJUMP:
8632         rs1[i]=0;
8633         rs2[i]=0;
8634         rt1[i]=0;
8635         rt2[i]=0;
8636         // The JAL instruction writes to r31.
8637         if (op&1) {
8638           rt1[i]=31;
8639         }
8640         rs2[i]=CCREG;
8641         break;
8642       case RJUMP:
8643         rs1[i]=(source[i]>>21)&0x1f;
8644         rs2[i]=0;
8645         rt1[i]=0;
8646         rt2[i]=0;
8647         // The JALR instruction writes to rd.
8648         if (op2&1) {
8649           rt1[i]=(source[i]>>11)&0x1f;
8650         }
8651         rs2[i]=CCREG;
8652         break;
8653       case CJUMP:
8654         rs1[i]=(source[i]>>21)&0x1f;
8655         rs2[i]=(source[i]>>16)&0x1f;
8656         rt1[i]=0;
8657         rt2[i]=0;
8658         if(op&2) { // BGTZ/BLEZ
8659           rs2[i]=0;
8660         }
8661         us1[i]=rs1[i];
8662         us2[i]=rs2[i];
8663         likely[i]=op>>4;
8664         break;
8665       case SJUMP:
8666         rs1[i]=(source[i]>>21)&0x1f;
8667         rs2[i]=CCREG;
8668         rt1[i]=0;
8669         rt2[i]=0;
8670         us1[i]=rs1[i];
8671         if(op2&0x10) { // BxxAL
8672           rt1[i]=31;
8673           // NOTE: If the branch is not taken, r31 is still overwritten
8674         }
8675         likely[i]=(op2&2)>>1;
8676         break;
8677       case FJUMP:
8678         rs1[i]=FSREG;
8679         rs2[i]=CSREG;
8680         rt1[i]=0;
8681         rt2[i]=0;
8682         likely[i]=((source[i])>>17)&1;
8683         break;
8684       case ALU:
8685         rs1[i]=(source[i]>>21)&0x1f; // source
8686         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8687         rt1[i]=(source[i]>>11)&0x1f; // destination
8688         rt2[i]=0;
8689         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8690           us1[i]=rs1[i];us2[i]=rs2[i];
8691         }
8692         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8693           dep1[i]=rs1[i];dep2[i]=rs2[i];
8694         }
8695         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8696           dep1[i]=rs1[i];dep2[i]=rs2[i];
8697         }
8698         break;
8699       case MULTDIV:
8700         rs1[i]=(source[i]>>21)&0x1f; // source
8701         rs2[i]=(source[i]>>16)&0x1f; // divisor
8702         rt1[i]=HIREG;
8703         rt2[i]=LOREG;
8704         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8705           us1[i]=rs1[i];us2[i]=rs2[i];
8706         }
8707         break;
8708       case MOV:
8709         rs1[i]=0;
8710         rs2[i]=0;
8711         rt1[i]=0;
8712         rt2[i]=0;
8713         if(op2==0x10) rs1[i]=HIREG; // MFHI
8714         if(op2==0x11) rt1[i]=HIREG; // MTHI
8715         if(op2==0x12) rs1[i]=LOREG; // MFLO
8716         if(op2==0x13) rt1[i]=LOREG; // MTLO
8717         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8718         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8719         dep1[i]=rs1[i];
8720         break;
8721       case SHIFT:
8722         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8723         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8724         rt1[i]=(source[i]>>11)&0x1f; // destination
8725         rt2[i]=0;
8726         // DSLLV/DSRLV/DSRAV are 64-bit
8727         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8728         break;
8729       case SHIFTIMM:
8730         rs1[i]=(source[i]>>16)&0x1f;
8731         rs2[i]=0;
8732         rt1[i]=(source[i]>>11)&0x1f;
8733         rt2[i]=0;
8734         imm[i]=(source[i]>>6)&0x1f;
8735         // DSxx32 instructions
8736         if(op2>=0x3c) imm[i]|=0x20;
8737         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8738         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8739         break;
8740       case COP0:
8741         rs1[i]=0;
8742         rs2[i]=0;
8743         rt1[i]=0;
8744         rt2[i]=0;
8745         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8746         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8747         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8748         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8749         break;
8750       case COP1:
8751         rs1[i]=0;
8752         rs2[i]=0;
8753         rt1[i]=0;
8754         rt2[i]=0;
8755         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8756         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8757         if(op2==5) us1[i]=rs1[i]; // DMTC1
8758         rs2[i]=CSREG;
8759         break;
8760       case COP2:
8761         rs1[i]=0;
8762         rs2[i]=0;
8763         rt1[i]=0;
8764         rt2[i]=0;
8765         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
8766         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
8767         rs2[i]=CSREG;
8768         int gr=(source[i]>>11)&0x1F;
8769         switch(op2)
8770         {
8771           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
8772           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
8773           case 0x02: gte_rs[i]=1ll<<(gr+32); break; // CFC2
8774           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
8775         }
8776         break;
8777       case C1LS:
8778         rs1[i]=(source[i]>>21)&0x1F;
8779         rs2[i]=CSREG;
8780         rt1[i]=0;
8781         rt2[i]=0;
8782         imm[i]=(short)source[i];
8783         break;
8784       case C2LS:
8785         rs1[i]=(source[i]>>21)&0x1F;
8786         rs2[i]=0;
8787         rt1[i]=0;
8788         rt2[i]=0;
8789         imm[i]=(short)source[i];
8790         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
8791         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
8792         break;
8793       case C2OP:
8794         rs1[i]=0;
8795         rs2[i]=0;
8796         rt1[i]=0;
8797         rt2[i]=0;
8798         gte_rs[i]=gte_reg_reads[source[i]&0x3f];
8799         gte_rt[i]=gte_reg_writes[source[i]&0x3f];
8800         gte_rt[i]|=1ll<<63; // every op changes flags
8801         if((source[i]&0x3f)==GTE_MVMVA) {
8802           int v = (source[i] >> 15) & 3;
8803           gte_rs[i]&=~0xe3fll;
8804           if(v==3) gte_rs[i]|=0xe00ll;
8805           else gte_rs[i]|=3ll<<(v*2);
8806         }
8807         break;
8808       case FLOAT:
8809       case FCONV:
8810         rs1[i]=0;
8811         rs2[i]=CSREG;
8812         rt1[i]=0;
8813         rt2[i]=0;
8814         break;
8815       case FCOMP:
8816         rs1[i]=FSREG;
8817         rs2[i]=CSREG;
8818         rt1[i]=FSREG;
8819         rt2[i]=0;
8820         break;
8821       case SYSCALL:
8822       case HLECALL:
8823       case INTCALL:
8824         rs1[i]=CCREG;
8825         rs2[i]=0;
8826         rt1[i]=0;
8827         rt2[i]=0;
8828         break;
8829       default:
8830         rs1[i]=0;
8831         rs2[i]=0;
8832         rt1[i]=0;
8833         rt2[i]=0;
8834     }
8835     /* Calculate branch target addresses */
8836     if(type==UJUMP)
8837       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8838     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8839       ba[i]=start+i*4+8; // Ignore never taken branch
8840     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8841       ba[i]=start+i*4+8; // Ignore never taken branch
8842     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8843       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8844     else ba[i]=-1;
8845 #ifdef PCSX
8846     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
8847       int do_in_intrp=0;
8848       // branch in delay slot?
8849       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8850         // don't handle first branch and call interpreter if it's hit
8851         SysPrintf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
8852         do_in_intrp=1;
8853       }
8854       // basic load delay detection
8855       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
8856         int t=(ba[i-1]-start)/4;
8857         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
8858           // jump target wants DS result - potential load delay effect
8859           SysPrintf("load delay @%08x (%08x)\n", addr + i*4, addr);
8860           do_in_intrp=1;
8861           bt[t+1]=1; // expected return from interpreter
8862         }
8863         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
8864               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
8865           // v0 overwrite like this is a sign of trouble, bail out
8866           SysPrintf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
8867           do_in_intrp=1;
8868         }
8869       }
8870       if(do_in_intrp) {
8871         rs1[i-1]=CCREG;
8872         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
8873         ba[i-1]=-1;
8874         itype[i-1]=INTCALL;
8875         done=2;
8876         i--; // don't compile the DS
8877       }
8878     }
8879 #endif
8880     /* Is this the end of the block? */
8881     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8882       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8883         done=2;
8884       }
8885       else {
8886         if(stop_after_jal) done=1;
8887         // Stop on BREAK
8888         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8889       }
8890       // Don't recompile stuff that's already compiled
8891       if(check_addr(start+i*4+4)) done=1;
8892       // Don't get too close to the limit
8893       if(i>MAXBLOCK/2) done=1;
8894     }
8895     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8896     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8897     if(done==2) {
8898       // Does the block continue due to a branch?
8899       for(j=i-1;j>=0;j--)
8900       {
8901         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
8902         if(ba[j]==start+i*4+4) done=j=0;
8903         if(ba[j]==start+i*4+8) done=j=0;
8904       }
8905     }
8906     //assert(i<MAXBLOCK-1);
8907     if(start+i*4==pagelimit-4) done=1;
8908     assert(start+i*4<pagelimit);
8909     if (i==MAXBLOCK-1) done=1;
8910     // Stop if we're compiling junk
8911     if(itype[i]==NI&&opcode[i]==0x11) {
8912       done=stop_after_jal=1;
8913       SysPrintf("Disabled speculative precompilation\n");
8914     }
8915   }
8916   slen=i;
8917   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8918     if(start+i*4==pagelimit) {
8919       itype[i-1]=SPAN;
8920     }
8921   }
8922   assert(slen>0);
8923
8924   /* Pass 2 - Register dependencies and branch targets */
8925
8926   unneeded_registers(0,slen-1,0);
8927
8928   /* Pass 3 - Register allocation */
8929
8930   struct regstat current; // Current register allocations/status
8931   current.is32=1;
8932   current.dirty=0;
8933   current.u=unneeded_reg[0];
8934   current.uu=unneeded_reg_upper[0];
8935   clear_all_regs(current.regmap);
8936   alloc_reg(&current,0,CCREG);
8937   dirty_reg(&current,CCREG);
8938   current.isconst=0;
8939   current.wasconst=0;
8940   current.waswritten=0;
8941   int ds=0;
8942   int cc=0;
8943   int hr=-1;
8944
8945 #ifndef FORCE32
8946   provisional_32bit();
8947 #endif
8948   if((u_int)addr&1) {
8949     // First instruction is delay slot
8950     cc=-1;
8951     bt[1]=1;
8952     ds=1;
8953     unneeded_reg[0]=1;
8954     unneeded_reg_upper[0]=1;
8955     current.regmap[HOST_BTREG]=BTREG;
8956   }
8957
8958   for(i=0;i<slen;i++)
8959   {
8960     if(bt[i])
8961     {
8962       int hr;
8963       for(hr=0;hr<HOST_REGS;hr++)
8964       {
8965         // Is this really necessary?
8966         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8967       }
8968       current.isconst=0;
8969       current.waswritten=0;
8970     }
8971     if(i>1)
8972     {
8973       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8974       {
8975         if(rs1[i-2]==0||rs2[i-2]==0)
8976         {
8977           if(rs1[i-2]) {
8978             current.is32|=1LL<<rs1[i-2];
8979             int hr=get_reg(current.regmap,rs1[i-2]|64);
8980             if(hr>=0) current.regmap[hr]=-1;
8981           }
8982           if(rs2[i-2]) {
8983             current.is32|=1LL<<rs2[i-2];
8984             int hr=get_reg(current.regmap,rs2[i-2]|64);
8985             if(hr>=0) current.regmap[hr]=-1;
8986           }
8987         }
8988       }
8989     }
8990 #ifndef FORCE32
8991     // If something jumps here with 64-bit values
8992     // then promote those registers to 64 bits
8993     if(bt[i])
8994     {
8995       uint64_t temp_is32=current.is32;
8996       for(j=i-1;j>=0;j--)
8997       {
8998         if(ba[j]==start+i*4)
8999           temp_is32&=branch_regs[j].is32;
9000       }
9001       for(j=i;j<slen;j++)
9002       {
9003         if(ba[j]==start+i*4)
9004           //temp_is32=1;
9005           temp_is32&=p32[j];
9006       }
9007       if(temp_is32!=current.is32) {
9008         //printf("dumping 32-bit regs (%x)\n",start+i*4);
9009         #ifndef DESTRUCTIVE_WRITEBACK
9010         if(ds)
9011         #endif
9012         for(hr=0;hr<HOST_REGS;hr++)
9013         {
9014           int r=current.regmap[hr];
9015           if(r>0&&r<64)
9016           {
9017             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
9018               temp_is32|=1LL<<r;
9019               //printf("restore %d\n",r);
9020             }
9021           }
9022         }
9023         current.is32=temp_is32;
9024       }
9025     }
9026 #else
9027     current.is32=-1LL;
9028 #endif
9029
9030     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
9031     regs[i].wasconst=current.isconst;
9032     regs[i].was32=current.is32;
9033     regs[i].wasdirty=current.dirty;
9034     regs[i].loadedconst=0;
9035     #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
9036     // To change a dirty register from 32 to 64 bits, we must write
9037     // it out during the previous cycle (for branches, 2 cycles)
9038     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
9039     {
9040       uint64_t temp_is32=current.is32;
9041       for(j=i-1;j>=0;j--)
9042       {
9043         if(ba[j]==start+i*4+4)
9044           temp_is32&=branch_regs[j].is32;
9045       }
9046       for(j=i;j<slen;j++)
9047       {
9048         if(ba[j]==start+i*4+4)
9049           //temp_is32=1;
9050           temp_is32&=p32[j];
9051       }
9052       if(temp_is32!=current.is32) {
9053         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
9054         for(hr=0;hr<HOST_REGS;hr++)
9055         {
9056           int r=current.regmap[hr];
9057           if(r>0)
9058           {
9059             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
9060               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
9061               {
9062                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
9063                 {
9064                   //printf("dump %d/r%d\n",hr,r);
9065                   current.regmap[hr]=-1;
9066                   if(get_reg(current.regmap,r|64)>=0)
9067                     current.regmap[get_reg(current.regmap,r|64)]=-1;
9068                 }
9069               }
9070             }
9071           }
9072         }
9073       }
9074     }
9075     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
9076     {
9077       uint64_t temp_is32=current.is32;
9078       for(j=i-1;j>=0;j--)
9079       {
9080         if(ba[j]==start+i*4+8)
9081           temp_is32&=branch_regs[j].is32;
9082       }
9083       for(j=i;j<slen;j++)
9084       {
9085         if(ba[j]==start+i*4+8)
9086           //temp_is32=1;
9087           temp_is32&=p32[j];
9088       }
9089       if(temp_is32!=current.is32) {
9090         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
9091         for(hr=0;hr<HOST_REGS;hr++)
9092         {
9093           int r=current.regmap[hr];
9094           if(r>0)
9095           {
9096             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
9097               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
9098               {
9099                 //printf("dump %d/r%d\n",hr,r);
9100                 current.regmap[hr]=-1;
9101                 if(get_reg(current.regmap,r|64)>=0)
9102                   current.regmap[get_reg(current.regmap,r|64)]=-1;
9103               }
9104             }
9105           }
9106         }
9107       }
9108     }
9109     #endif
9110     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9111       if(i+1<slen) {
9112         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9113         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9114         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9115         current.u|=1;
9116         current.uu|=1;
9117       } else {
9118         current.u=1;
9119         current.uu=1;
9120       }
9121     } else {
9122       if(i+1<slen) {
9123         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
9124         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9125         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9126         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
9127         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9128         current.u|=1;
9129         current.uu|=1;
9130       } else { SysPrintf("oops, branch at end of block with no delay slot\n");exit(1); }
9131     }
9132     is_ds[i]=ds;
9133     if(ds) {
9134       ds=0; // Skip delay slot, already allocated as part of branch
9135       // ...but we need to alloc it in case something jumps here
9136       if(i+1<slen) {
9137         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
9138         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
9139       }else{
9140         current.u=branch_unneeded_reg[i-1];
9141         current.uu=branch_unneeded_reg_upper[i-1];
9142       }
9143       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
9144       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9145       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9146       current.u|=1;
9147       current.uu|=1;
9148       struct regstat temp;
9149       memcpy(&temp,&current,sizeof(current));
9150       temp.wasdirty=temp.dirty;
9151       temp.was32=temp.is32;
9152       // TODO: Take into account unconditional branches, as below
9153       delayslot_alloc(&temp,i);
9154       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
9155       regs[i].wasdirty=temp.wasdirty;
9156       regs[i].was32=temp.was32;
9157       regs[i].dirty=temp.dirty;
9158       regs[i].is32=temp.is32;
9159       regs[i].isconst=0;
9160       regs[i].wasconst=0;
9161       current.isconst=0;
9162       // Create entry (branch target) regmap
9163       for(hr=0;hr<HOST_REGS;hr++)
9164       {
9165         int r=temp.regmap[hr];
9166         if(r>=0) {
9167           if(r!=regmap_pre[i][hr]) {
9168             regs[i].regmap_entry[hr]=-1;
9169           }
9170           else
9171           {
9172             if(r<64){
9173               if((current.u>>r)&1) {
9174                 regs[i].regmap_entry[hr]=-1;
9175                 regs[i].regmap[hr]=-1;
9176                 //Don't clear regs in the delay slot as the branch might need them
9177                 //current.regmap[hr]=-1;
9178               }else
9179                 regs[i].regmap_entry[hr]=r;
9180             }
9181             else {
9182               if((current.uu>>(r&63))&1) {
9183                 regs[i].regmap_entry[hr]=-1;
9184                 regs[i].regmap[hr]=-1;
9185                 //Don't clear regs in the delay slot as the branch might need them
9186                 //current.regmap[hr]=-1;
9187               }else
9188                 regs[i].regmap_entry[hr]=r;
9189             }
9190           }
9191         } else {
9192           // First instruction expects CCREG to be allocated
9193           if(i==0&&hr==HOST_CCREG)
9194             regs[i].regmap_entry[hr]=CCREG;
9195           else
9196             regs[i].regmap_entry[hr]=-1;
9197         }
9198       }
9199     }
9200     else { // Not delay slot
9201       switch(itype[i]) {
9202         case UJUMP:
9203           //current.isconst=0; // DEBUG
9204           //current.wasconst=0; // DEBUG
9205           //regs[i].wasconst=0; // DEBUG
9206           clear_const(&current,rt1[i]);
9207           alloc_cc(&current,i);
9208           dirty_reg(&current,CCREG);
9209           if (rt1[i]==31) {
9210             alloc_reg(&current,i,31);
9211             dirty_reg(&current,31);
9212             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
9213             //assert(rt1[i+1]!=rt1[i]);
9214             #ifdef REG_PREFETCH
9215             alloc_reg(&current,i,PTEMP);
9216             #endif
9217             //current.is32|=1LL<<rt1[i];
9218           }
9219           ooo[i]=1;
9220           delayslot_alloc(&current,i+1);
9221           //current.isconst=0; // DEBUG
9222           ds=1;
9223           //printf("i=%d, isconst=%x\n",i,current.isconst);
9224           break;
9225         case RJUMP:
9226           //current.isconst=0;
9227           //current.wasconst=0;
9228           //regs[i].wasconst=0;
9229           clear_const(&current,rs1[i]);
9230           clear_const(&current,rt1[i]);
9231           alloc_cc(&current,i);
9232           dirty_reg(&current,CCREG);
9233           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
9234             alloc_reg(&current,i,rs1[i]);
9235             if (rt1[i]!=0) {
9236               alloc_reg(&current,i,rt1[i]);
9237               dirty_reg(&current,rt1[i]);
9238               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
9239               assert(rt1[i+1]!=rt1[i]);
9240               #ifdef REG_PREFETCH
9241               alloc_reg(&current,i,PTEMP);
9242               #endif
9243             }
9244             #ifdef USE_MINI_HT
9245             if(rs1[i]==31) { // JALR
9246               alloc_reg(&current,i,RHASH);
9247               #ifndef HOST_IMM_ADDR32
9248               alloc_reg(&current,i,RHTBL);
9249               #endif
9250             }
9251             #endif
9252             delayslot_alloc(&current,i+1);
9253           } else {
9254             // The delay slot overwrites our source register,
9255             // allocate a temporary register to hold the old value.
9256             current.isconst=0;
9257             current.wasconst=0;
9258             regs[i].wasconst=0;
9259             delayslot_alloc(&current,i+1);
9260             current.isconst=0;
9261             alloc_reg(&current,i,RTEMP);
9262           }
9263           //current.isconst=0; // DEBUG
9264           ooo[i]=1;
9265           ds=1;
9266           break;
9267         case CJUMP:
9268           //current.isconst=0;
9269           //current.wasconst=0;
9270           //regs[i].wasconst=0;
9271           clear_const(&current,rs1[i]);
9272           clear_const(&current,rs2[i]);
9273           if((opcode[i]&0x3E)==4) // BEQ/BNE
9274           {
9275             alloc_cc(&current,i);
9276             dirty_reg(&current,CCREG);
9277             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9278             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9279             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9280             {
9281               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9282               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9283             }
9284             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
9285                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
9286               // The delay slot overwrites one of our conditions.
9287               // Allocate the branch condition registers instead.
9288               current.isconst=0;
9289               current.wasconst=0;
9290               regs[i].wasconst=0;
9291               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9292               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9293               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9294               {
9295                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9296                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9297               }
9298             }
9299             else
9300             {
9301               ooo[i]=1;
9302               delayslot_alloc(&current,i+1);
9303             }
9304           }
9305           else
9306           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
9307           {
9308             alloc_cc(&current,i);
9309             dirty_reg(&current,CCREG);
9310             alloc_reg(&current,i,rs1[i]);
9311             if(!(current.is32>>rs1[i]&1))
9312             {
9313               alloc_reg64(&current,i,rs1[i]);
9314             }
9315             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
9316               // The delay slot overwrites one of our conditions.
9317               // Allocate the branch condition registers instead.
9318               current.isconst=0;
9319               current.wasconst=0;
9320               regs[i].wasconst=0;
9321               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9322               if(!((current.is32>>rs1[i])&1))
9323               {
9324                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9325               }
9326             }
9327             else
9328             {
9329               ooo[i]=1;
9330               delayslot_alloc(&current,i+1);
9331             }
9332           }
9333           else
9334           // Don't alloc the delay slot yet because we might not execute it
9335           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
9336           {
9337             current.isconst=0;
9338             current.wasconst=0;
9339             regs[i].wasconst=0;
9340             alloc_cc(&current,i);
9341             dirty_reg(&current,CCREG);
9342             alloc_reg(&current,i,rs1[i]);
9343             alloc_reg(&current,i,rs2[i]);
9344             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9345             {
9346               alloc_reg64(&current,i,rs1[i]);
9347               alloc_reg64(&current,i,rs2[i]);
9348             }
9349           }
9350           else
9351           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
9352           {
9353             current.isconst=0;
9354             current.wasconst=0;
9355             regs[i].wasconst=0;
9356             alloc_cc(&current,i);
9357             dirty_reg(&current,CCREG);
9358             alloc_reg(&current,i,rs1[i]);
9359             if(!(current.is32>>rs1[i]&1))
9360             {
9361               alloc_reg64(&current,i,rs1[i]);
9362             }
9363           }
9364           ds=1;
9365           //current.isconst=0;
9366           break;
9367         case SJUMP:
9368           //current.isconst=0;
9369           //current.wasconst=0;
9370           //regs[i].wasconst=0;
9371           clear_const(&current,rs1[i]);
9372           clear_const(&current,rt1[i]);
9373           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
9374           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
9375           {
9376             alloc_cc(&current,i);
9377             dirty_reg(&current,CCREG);
9378             alloc_reg(&current,i,rs1[i]);
9379             if(!(current.is32>>rs1[i]&1))
9380             {
9381               alloc_reg64(&current,i,rs1[i]);
9382             }
9383             if (rt1[i]==31) { // BLTZAL/BGEZAL
9384               alloc_reg(&current,i,31);
9385               dirty_reg(&current,31);
9386               //#ifdef REG_PREFETCH
9387               //alloc_reg(&current,i,PTEMP);
9388               //#endif
9389               //current.is32|=1LL<<rt1[i];
9390             }
9391             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
9392                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
9393               // Allocate the branch condition registers instead.
9394               current.isconst=0;
9395               current.wasconst=0;
9396               regs[i].wasconst=0;
9397               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9398               if(!((current.is32>>rs1[i])&1))
9399               {
9400                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9401               }
9402             }
9403             else
9404             {
9405               ooo[i]=1;
9406               delayslot_alloc(&current,i+1);
9407             }
9408           }
9409           else
9410           // Don't alloc the delay slot yet because we might not execute it
9411           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9412           {
9413             current.isconst=0;
9414             current.wasconst=0;
9415             regs[i].wasconst=0;
9416             alloc_cc(&current,i);
9417             dirty_reg(&current,CCREG);
9418             alloc_reg(&current,i,rs1[i]);
9419             if(!(current.is32>>rs1[i]&1))
9420             {
9421               alloc_reg64(&current,i,rs1[i]);
9422             }
9423           }
9424           ds=1;
9425           //current.isconst=0;
9426           break;
9427         case FJUMP:
9428           current.isconst=0;
9429           current.wasconst=0;
9430           regs[i].wasconst=0;
9431           if(likely[i]==0) // BC1F/BC1T
9432           {
9433             // TODO: Theoretically we can run out of registers here on x86.
9434             // The delay slot can allocate up to six, and we need to check
9435             // CSREG before executing the delay slot.  Possibly we can drop
9436             // the cycle count and then reload it after checking that the
9437             // FPU is in a usable state, or don't do out-of-order execution.
9438             alloc_cc(&current,i);
9439             dirty_reg(&current,CCREG);
9440             alloc_reg(&current,i,FSREG);
9441             alloc_reg(&current,i,CSREG);
9442             if(itype[i+1]==FCOMP) {
9443               // The delay slot overwrites the branch condition.
9444               // Allocate the branch condition registers instead.
9445               alloc_cc(&current,i);
9446               dirty_reg(&current,CCREG);
9447               alloc_reg(&current,i,CSREG);
9448               alloc_reg(&current,i,FSREG);
9449             }
9450             else {
9451               ooo[i]=1;
9452               delayslot_alloc(&current,i+1);
9453               alloc_reg(&current,i+1,CSREG);
9454             }
9455           }
9456           else
9457           // Don't alloc the delay slot yet because we might not execute it
9458           if(likely[i]) // BC1FL/BC1TL
9459           {
9460             alloc_cc(&current,i);
9461             dirty_reg(&current,CCREG);
9462             alloc_reg(&current,i,CSREG);
9463             alloc_reg(&current,i,FSREG);
9464           }
9465           ds=1;
9466           current.isconst=0;
9467           break;
9468         case IMM16:
9469           imm16_alloc(&current,i);
9470           break;
9471         case LOAD:
9472         case LOADLR:
9473           load_alloc(&current,i);
9474           break;
9475         case STORE:
9476         case STORELR:
9477           store_alloc(&current,i);
9478           break;
9479         case ALU:
9480           alu_alloc(&current,i);
9481           break;
9482         case SHIFT:
9483           shift_alloc(&current,i);
9484           break;
9485         case MULTDIV:
9486           multdiv_alloc(&current,i);
9487           break;
9488         case SHIFTIMM:
9489           shiftimm_alloc(&current,i);
9490           break;
9491         case MOV:
9492           mov_alloc(&current,i);
9493           break;
9494         case COP0:
9495           cop0_alloc(&current,i);
9496           break;
9497         case COP1:
9498         case COP2:
9499           cop1_alloc(&current,i);
9500           break;
9501         case C1LS:
9502           c1ls_alloc(&current,i);
9503           break;
9504         case C2LS:
9505           c2ls_alloc(&current,i);
9506           break;
9507         case C2OP:
9508           c2op_alloc(&current,i);
9509           break;
9510         case FCONV:
9511           fconv_alloc(&current,i);
9512           break;
9513         case FLOAT:
9514           float_alloc(&current,i);
9515           break;
9516         case FCOMP:
9517           fcomp_alloc(&current,i);
9518           break;
9519         case SYSCALL:
9520         case HLECALL:
9521         case INTCALL:
9522           syscall_alloc(&current,i);
9523           break;
9524         case SPAN:
9525           pagespan_alloc(&current,i);
9526           break;
9527       }
9528
9529       // Drop the upper half of registers that have become 32-bit
9530       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9531       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9532         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9533         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9534         current.uu|=1;
9535       } else {
9536         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9537         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9538         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9539         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9540         current.uu|=1;
9541       }
9542
9543       // Create entry (branch target) regmap
9544       for(hr=0;hr<HOST_REGS;hr++)
9545       {
9546         int r,or,er;
9547         r=current.regmap[hr];
9548         if(r>=0) {
9549           if(r!=regmap_pre[i][hr]) {
9550             // TODO: delay slot (?)
9551             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9552             if(or<0||(r&63)>=TEMPREG){
9553               regs[i].regmap_entry[hr]=-1;
9554             }
9555             else
9556             {
9557               // Just move it to a different register
9558               regs[i].regmap_entry[hr]=r;
9559               // If it was dirty before, it's still dirty
9560               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9561             }
9562           }
9563           else
9564           {
9565             // Unneeded
9566             if(r==0){
9567               regs[i].regmap_entry[hr]=0;
9568             }
9569             else
9570             if(r<64){
9571               if((current.u>>r)&1) {
9572                 regs[i].regmap_entry[hr]=-1;
9573                 //regs[i].regmap[hr]=-1;
9574                 current.regmap[hr]=-1;
9575               }else
9576                 regs[i].regmap_entry[hr]=r;
9577             }
9578             else {
9579               if((current.uu>>(r&63))&1) {
9580                 regs[i].regmap_entry[hr]=-1;
9581                 //regs[i].regmap[hr]=-1;
9582                 current.regmap[hr]=-1;
9583               }else
9584                 regs[i].regmap_entry[hr]=r;
9585             }
9586           }
9587         } else {
9588           // Branches expect CCREG to be allocated at the target
9589           if(regmap_pre[i][hr]==CCREG)
9590             regs[i].regmap_entry[hr]=CCREG;
9591           else
9592             regs[i].regmap_entry[hr]=-1;
9593         }
9594       }
9595       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9596     }
9597
9598     if(i>0&&(itype[i-1]==STORE||itype[i-1]==STORELR||(itype[i-1]==C2LS&&opcode[i-1]==0x3a))&&(u_int)imm[i-1]<0x800)
9599       current.waswritten|=1<<rs1[i-1];
9600     current.waswritten&=~(1<<rt1[i]);
9601     current.waswritten&=~(1<<rt2[i]);
9602     if((itype[i]==STORE||itype[i]==STORELR||(itype[i]==C2LS&&opcode[i]==0x3a))&&(u_int)imm[i]>=0x800)
9603       current.waswritten&=~(1<<rs1[i]);
9604
9605     /* Branch post-alloc */
9606     if(i>0)
9607     {
9608       current.was32=current.is32;
9609       current.wasdirty=current.dirty;
9610       switch(itype[i-1]) {
9611         case UJUMP:
9612           memcpy(&branch_regs[i-1],&current,sizeof(current));
9613           branch_regs[i-1].isconst=0;
9614           branch_regs[i-1].wasconst=0;
9615           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9616           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9617           alloc_cc(&branch_regs[i-1],i-1);
9618           dirty_reg(&branch_regs[i-1],CCREG);
9619           if(rt1[i-1]==31) { // JAL
9620             alloc_reg(&branch_regs[i-1],i-1,31);
9621             dirty_reg(&branch_regs[i-1],31);
9622             branch_regs[i-1].is32|=1LL<<31;
9623           }
9624           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9625           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
9626           break;
9627         case RJUMP:
9628           memcpy(&branch_regs[i-1],&current,sizeof(current));
9629           branch_regs[i-1].isconst=0;
9630           branch_regs[i-1].wasconst=0;
9631           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9632           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9633           alloc_cc(&branch_regs[i-1],i-1);
9634           dirty_reg(&branch_regs[i-1],CCREG);
9635           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9636           if(rt1[i-1]!=0) { // JALR
9637             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9638             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9639             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9640           }
9641           #ifdef USE_MINI_HT
9642           if(rs1[i-1]==31) { // JALR
9643             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9644             #ifndef HOST_IMM_ADDR32
9645             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9646             #endif
9647           }
9648           #endif
9649           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9650           memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
9651           break;
9652         case CJUMP:
9653           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9654           {
9655             alloc_cc(&current,i-1);
9656             dirty_reg(&current,CCREG);
9657             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9658                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9659               // The delay slot overwrote one of our conditions
9660               // Delay slot goes after the test (in order)
9661               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9662               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9663               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9664               current.u|=1;
9665               current.uu|=1;
9666               delayslot_alloc(&current,i);
9667               current.isconst=0;
9668             }
9669             else
9670             {
9671               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9672               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9673               // Alloc the branch condition registers
9674               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9675               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9676               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9677               {
9678                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9679                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9680               }
9681             }
9682             memcpy(&branch_regs[i-1],&current,sizeof(current));
9683             branch_regs[i-1].isconst=0;
9684             branch_regs[i-1].wasconst=0;
9685             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9686             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
9687           }
9688           else
9689           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9690           {
9691             alloc_cc(&current,i-1);
9692             dirty_reg(&current,CCREG);
9693             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9694               // The delay slot overwrote the branch condition
9695               // Delay slot goes after the test (in order)
9696               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9697               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9698               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9699               current.u|=1;
9700               current.uu|=1;
9701               delayslot_alloc(&current,i);
9702               current.isconst=0;
9703             }
9704             else
9705             {
9706               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9707               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9708               // Alloc the branch condition register
9709               alloc_reg(&current,i-1,rs1[i-1]);
9710               if(!(current.is32>>rs1[i-1]&1))
9711               {
9712                 alloc_reg64(&current,i-1,rs1[i-1]);
9713               }
9714             }
9715             memcpy(&branch_regs[i-1],&current,sizeof(current));
9716             branch_regs[i-1].isconst=0;
9717             branch_regs[i-1].wasconst=0;
9718             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9719             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
9720           }
9721           else
9722           // Alloc the delay slot in case the branch is taken
9723           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9724           {
9725             memcpy(&branch_regs[i-1],&current,sizeof(current));
9726             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9727             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9728             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9729             alloc_cc(&branch_regs[i-1],i);
9730             dirty_reg(&branch_regs[i-1],CCREG);
9731             delayslot_alloc(&branch_regs[i-1],i);
9732             branch_regs[i-1].isconst=0;
9733             alloc_reg(&current,i,CCREG); // Not taken path
9734             dirty_reg(&current,CCREG);
9735             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9736           }
9737           else
9738           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9739           {
9740             memcpy(&branch_regs[i-1],&current,sizeof(current));
9741             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9742             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9743             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9744             alloc_cc(&branch_regs[i-1],i);
9745             dirty_reg(&branch_regs[i-1],CCREG);
9746             delayslot_alloc(&branch_regs[i-1],i);
9747             branch_regs[i-1].isconst=0;
9748             alloc_reg(&current,i,CCREG); // Not taken path
9749             dirty_reg(&current,CCREG);
9750             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9751           }
9752           break;
9753         case SJUMP:
9754           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9755           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9756           {
9757             alloc_cc(&current,i-1);
9758             dirty_reg(&current,CCREG);
9759             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9760               // The delay slot overwrote the branch condition
9761               // Delay slot goes after the test (in order)
9762               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9763               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9764               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9765               current.u|=1;
9766               current.uu|=1;
9767               delayslot_alloc(&current,i);
9768               current.isconst=0;
9769             }
9770             else
9771             {
9772               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9773               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9774               // Alloc the branch condition register
9775               alloc_reg(&current,i-1,rs1[i-1]);
9776               if(!(current.is32>>rs1[i-1]&1))
9777               {
9778                 alloc_reg64(&current,i-1,rs1[i-1]);
9779               }
9780             }
9781             memcpy(&branch_regs[i-1],&current,sizeof(current));
9782             branch_regs[i-1].isconst=0;
9783             branch_regs[i-1].wasconst=0;
9784             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9785             memcpy(constmap[i],constmap[i-1],sizeof(current_constmap));
9786           }
9787           else
9788           // Alloc the delay slot in case the branch is taken
9789           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9790           {
9791             memcpy(&branch_regs[i-1],&current,sizeof(current));
9792             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9793             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9794             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9795             alloc_cc(&branch_regs[i-1],i);
9796             dirty_reg(&branch_regs[i-1],CCREG);
9797             delayslot_alloc(&branch_regs[i-1],i);
9798             branch_regs[i-1].isconst=0;
9799             alloc_reg(&current,i,CCREG); // Not taken path
9800             dirty_reg(&current,CCREG);
9801             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9802           }
9803           // FIXME: BLTZAL/BGEZAL
9804           if(opcode2[i-1]&0x10) { // BxxZAL
9805             alloc_reg(&branch_regs[i-1],i-1,31);
9806             dirty_reg(&branch_regs[i-1],31);
9807             branch_regs[i-1].is32|=1LL<<31;
9808           }
9809           break;
9810         case FJUMP:
9811           if(likely[i-1]==0) // BC1F/BC1T
9812           {
9813             alloc_cc(&current,i-1);
9814             dirty_reg(&current,CCREG);
9815             if(itype[i]==FCOMP) {
9816               // The delay slot overwrote the branch condition
9817               // Delay slot goes after the test (in order)
9818               delayslot_alloc(&current,i);
9819               current.isconst=0;
9820             }
9821             else
9822             {
9823               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9824               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9825               // Alloc the branch condition register
9826               alloc_reg(&current,i-1,FSREG);
9827             }
9828             memcpy(&branch_regs[i-1],&current,sizeof(current));
9829             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9830           }
9831           else // BC1FL/BC1TL
9832           {
9833             // Alloc the delay slot in case the branch is taken
9834             memcpy(&branch_regs[i-1],&current,sizeof(current));
9835             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9836             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9837             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9838             alloc_cc(&branch_regs[i-1],i);
9839             dirty_reg(&branch_regs[i-1],CCREG);
9840             delayslot_alloc(&branch_regs[i-1],i);
9841             branch_regs[i-1].isconst=0;
9842             alloc_reg(&current,i,CCREG); // Not taken path
9843             dirty_reg(&current,CCREG);
9844             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9845           }
9846           break;
9847       }
9848
9849       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9850       {
9851         if(rt1[i-1]==31) // JAL/JALR
9852         {
9853           // Subroutine call will return here, don't alloc any registers
9854           current.is32=1;
9855           current.dirty=0;
9856           clear_all_regs(current.regmap);
9857           alloc_reg(&current,i,CCREG);
9858           dirty_reg(&current,CCREG);
9859         }
9860         else if(i+1<slen)
9861         {
9862           // Internal branch will jump here, match registers to caller
9863           current.is32=0x3FFFFFFFFLL;
9864           current.dirty=0;
9865           clear_all_regs(current.regmap);
9866           alloc_reg(&current,i,CCREG);
9867           dirty_reg(&current,CCREG);
9868           for(j=i-1;j>=0;j--)
9869           {
9870             if(ba[j]==start+i*4+4) {
9871               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9872               current.is32=branch_regs[j].is32;
9873               current.dirty=branch_regs[j].dirty;
9874               break;
9875             }
9876           }
9877           while(j>=0) {
9878             if(ba[j]==start+i*4+4) {
9879               for(hr=0;hr<HOST_REGS;hr++) {
9880                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9881                   current.regmap[hr]=-1;
9882                 }
9883                 current.is32&=branch_regs[j].is32;
9884                 current.dirty&=branch_regs[j].dirty;
9885               }
9886             }
9887             j--;
9888           }
9889         }
9890       }
9891     }
9892
9893     // Count cycles in between branches
9894     ccadj[i]=cc;
9895     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9896     {
9897       cc=0;
9898     }
9899 #if defined(PCSX) && !defined(DRC_DBG)
9900     else if(itype[i]==C2OP&&gte_cycletab[source[i]&0x3f]>2)
9901     {
9902       // GTE runs in parallel until accessed, divide by 2 for a rough guess
9903       cc+=gte_cycletab[source[i]&0x3f]/2;
9904     }
9905     else if(/*itype[i]==LOAD||itype[i]==STORE||*/itype[i]==C1LS) // load,store causes weird timing issues
9906     {
9907       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9908     }
9909     else if(i>1&&itype[i]==STORE&&itype[i-1]==STORE&&itype[i-2]==STORE&&!bt[i])
9910     {
9911       cc+=4;
9912     }
9913     else if(itype[i]==C2LS)
9914     {
9915       cc+=4;
9916     }
9917 #endif
9918     else
9919     {
9920       cc++;
9921     }
9922
9923     flush_dirty_uppers(&current);
9924     if(!is_ds[i]) {
9925       regs[i].is32=current.is32;
9926       regs[i].dirty=current.dirty;
9927       regs[i].isconst=current.isconst;
9928       memcpy(constmap[i],current_constmap,sizeof(current_constmap));
9929     }
9930     for(hr=0;hr<HOST_REGS;hr++) {
9931       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9932         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9933           regs[i].wasconst&=~(1<<hr);
9934         }
9935       }
9936     }
9937     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9938     regs[i].waswritten=current.waswritten;
9939   }
9940
9941   /* Pass 4 - Cull unused host registers */
9942
9943   uint64_t nr=0;
9944
9945   for (i=slen-1;i>=0;i--)
9946   {
9947     int hr;
9948     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9949     {
9950       if(ba[i]<start || ba[i]>=(start+slen*4))
9951       {
9952         // Branch out of this block, don't need anything
9953         nr=0;
9954       }
9955       else
9956       {
9957         // Internal branch
9958         // Need whatever matches the target
9959         nr=0;
9960         int t=(ba[i]-start)>>2;
9961         for(hr=0;hr<HOST_REGS;hr++)
9962         {
9963           if(regs[i].regmap_entry[hr]>=0) {
9964             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9965           }
9966         }
9967       }
9968       // Conditional branch may need registers for following instructions
9969       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9970       {
9971         if(i<slen-2) {
9972           nr|=needed_reg[i+2];
9973           for(hr=0;hr<HOST_REGS;hr++)
9974           {
9975             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9976             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9977           }
9978         }
9979       }
9980       // Don't need stuff which is overwritten
9981       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9982       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9983       // Merge in delay slot
9984       for(hr=0;hr<HOST_REGS;hr++)
9985       {
9986         if(!likely[i]) {
9987           // These are overwritten unless the branch is "likely"
9988           // and the delay slot is nullified if not taken
9989           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9990           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9991         }
9992         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9993         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9994         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9995         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9996         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9997         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9998         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9999         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
10000         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
10001           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10002           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10003         }
10004         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
10005           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10006           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10007         }
10008         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
10009           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
10010           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
10011         }
10012       }
10013     }
10014     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10015     {
10016       // SYSCALL instruction (software interrupt)
10017       nr=0;
10018     }
10019     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10020     {
10021       // ERET instruction (return from interrupt)
10022       nr=0;
10023     }
10024     else // Non-branch
10025     {
10026       if(i<slen-1) {
10027         for(hr=0;hr<HOST_REGS;hr++) {
10028           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
10029           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
10030           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
10031           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
10032         }
10033       }
10034     }
10035     for(hr=0;hr<HOST_REGS;hr++)
10036     {
10037       // Overwritten registers are not needed
10038       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
10039       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
10040       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
10041       // Source registers are needed
10042       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10043       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10044       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
10045       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
10046       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10047       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10048       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
10049       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
10050       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
10051         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10052         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10053       }
10054       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
10055         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10056         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10057       }
10058       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
10059         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
10060         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
10061       }
10062       // Don't store a register immediately after writing it,
10063       // may prevent dual-issue.
10064       // But do so if this is a branch target, otherwise we
10065       // might have to load the register before the branch.
10066       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
10067         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
10068            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
10069           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10070           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
10071         }
10072         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
10073            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
10074           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10075           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
10076         }
10077       }
10078     }
10079     // Cycle count is needed at branches.  Assume it is needed at the target too.
10080     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
10081       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
10082       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
10083     }
10084     // Save it
10085     needed_reg[i]=nr;
10086
10087     // Deallocate unneeded registers
10088     for(hr=0;hr<HOST_REGS;hr++)
10089     {
10090       if(!((nr>>hr)&1)) {
10091         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
10092         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
10093            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10094            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
10095         {
10096           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10097           {
10098             if(likely[i]) {
10099               regs[i].regmap[hr]=-1;
10100               regs[i].isconst&=~(1<<hr);
10101               if(i<slen-2) {
10102                 regmap_pre[i+2][hr]=-1;
10103                 regs[i+2].wasconst&=~(1<<hr);
10104               }
10105             }
10106           }
10107         }
10108         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10109         {
10110           int d1=0,d2=0,map=0,temp=0;
10111           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
10112           {
10113             d1=dep1[i+1];
10114             d2=dep2[i+1];
10115           }
10116           if(using_tlb) {
10117             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
10118                itype[i+1]==STORE || itype[i+1]==STORELR ||
10119                itype[i+1]==C1LS || itype[i+1]==C2LS)
10120             map=TLREG;
10121           } else
10122           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
10123              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
10124             map=INVCP;
10125           }
10126           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
10127              itype[i+1]==C1LS || itype[i+1]==C2LS)
10128             temp=FTEMP;
10129           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
10130              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10131              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
10132              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
10133              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10134              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
10135              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
10136              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
10137              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
10138              regs[i].regmap[hr]!=map )
10139           {
10140             regs[i].regmap[hr]=-1;
10141             regs[i].isconst&=~(1<<hr);
10142             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
10143                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
10144                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
10145                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
10146                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
10147                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
10148                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
10149                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
10150                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
10151                branch_regs[i].regmap[hr]!=map)
10152             {
10153               branch_regs[i].regmap[hr]=-1;
10154               branch_regs[i].regmap_entry[hr]=-1;
10155               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10156               {
10157                 if(!likely[i]&&i<slen-2) {
10158                   regmap_pre[i+2][hr]=-1;
10159                   regs[i+2].wasconst&=~(1<<hr);
10160                 }
10161               }
10162             }
10163           }
10164         }
10165         else
10166         {
10167           // Non-branch
10168           if(i>0)
10169           {
10170             int d1=0,d2=0,map=-1,temp=-1;
10171             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
10172             {
10173               d1=dep1[i];
10174               d2=dep2[i];
10175             }
10176             if(using_tlb) {
10177               if(itype[i]==LOAD || itype[i]==LOADLR ||
10178                  itype[i]==STORE || itype[i]==STORELR ||
10179                  itype[i]==C1LS || itype[i]==C2LS)
10180               map=TLREG;
10181             } else if(itype[i]==STORE || itype[i]==STORELR ||
10182                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
10183               map=INVCP;
10184             }
10185             if(itype[i]==LOADLR || itype[i]==STORELR ||
10186                itype[i]==C1LS || itype[i]==C2LS)
10187               temp=FTEMP;
10188             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10189                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
10190                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10191                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
10192                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
10193                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
10194             {
10195               if(i<slen-1&&!is_ds[i]) {
10196                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
10197                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
10198                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
10199                 {
10200                   SysPrintf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
10201                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
10202                 }
10203                 regmap_pre[i+1][hr]=-1;
10204                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
10205                 regs[i+1].wasconst&=~(1<<hr);
10206               }
10207               regs[i].regmap[hr]=-1;
10208               regs[i].isconst&=~(1<<hr);
10209             }
10210           }
10211         }
10212       }
10213     }
10214   }
10215
10216   /* Pass 5 - Pre-allocate registers */
10217
10218   // If a register is allocated during a loop, try to allocate it for the
10219   // entire loop, if possible.  This avoids loading/storing registers
10220   // inside of the loop.
10221
10222   signed char f_regmap[HOST_REGS];
10223   clear_all_regs(f_regmap);
10224   for(i=0;i<slen-1;i++)
10225   {
10226     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10227     {
10228       if(ba[i]>=start && ba[i]<(start+i*4))
10229       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
10230       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
10231       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
10232       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
10233       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
10234       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
10235       {
10236         int t=(ba[i]-start)>>2;
10237         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
10238         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
10239         for(hr=0;hr<HOST_REGS;hr++)
10240         {
10241           if(regs[i].regmap[hr]>64) {
10242             if(!((regs[i].dirty>>hr)&1))
10243               f_regmap[hr]=regs[i].regmap[hr];
10244             else f_regmap[hr]=-1;
10245           }
10246           else if(regs[i].regmap[hr]>=0) {
10247             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10248               // dealloc old register
10249               int n;
10250               for(n=0;n<HOST_REGS;n++)
10251               {
10252                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10253               }
10254               // and alloc new one
10255               f_regmap[hr]=regs[i].regmap[hr];
10256             }
10257           }
10258           if(branch_regs[i].regmap[hr]>64) {
10259             if(!((branch_regs[i].dirty>>hr)&1))
10260               f_regmap[hr]=branch_regs[i].regmap[hr];
10261             else f_regmap[hr]=-1;
10262           }
10263           else if(branch_regs[i].regmap[hr]>=0) {
10264             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
10265               // dealloc old register
10266               int n;
10267               for(n=0;n<HOST_REGS;n++)
10268               {
10269                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
10270               }
10271               // and alloc new one
10272               f_regmap[hr]=branch_regs[i].regmap[hr];
10273             }
10274           }
10275           if(ooo[i]) {
10276             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1])
10277               f_regmap[hr]=branch_regs[i].regmap[hr];
10278           }else{
10279             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1])
10280               f_regmap[hr]=branch_regs[i].regmap[hr];
10281           }
10282           // Avoid dirty->clean transition
10283           #ifdef DESTRUCTIVE_WRITEBACK
10284           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
10285           #endif
10286           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
10287           // case above, however it's always a good idea.  We can't hoist the
10288           // load if the register was already allocated, so there's no point
10289           // wasting time analyzing most of these cases.  It only "succeeds"
10290           // when the mapping was different and the load can be replaced with
10291           // a mov, which is of negligible benefit.  So such cases are
10292           // skipped below.
10293           if(f_regmap[hr]>0) {
10294             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
10295               int r=f_regmap[hr];
10296               for(j=t;j<=i;j++)
10297               {
10298                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10299                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
10300                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
10301                 if(r>63) {
10302                   // NB This can exclude the case where the upper-half
10303                   // register is lower numbered than the lower-half
10304                   // register.  Not sure if it's worth fixing...
10305                   if(get_reg(regs[j].regmap,r&63)<0) break;
10306                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
10307                   if(regs[j].is32&(1LL<<(r&63))) break;
10308                 }
10309                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
10310                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10311                   int k;
10312                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
10313                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
10314                     if(r>63) {
10315                       if(get_reg(regs[i].regmap,r&63)<0) break;
10316                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
10317                     }
10318                     k=i;
10319                     while(k>1&&regs[k-1].regmap[hr]==-1) {
10320                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10321                         //printf("no free regs for store %x\n",start+(k-1)*4);
10322                         break;
10323                       }
10324                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
10325                         //printf("no-match due to different register\n");
10326                         break;
10327                       }
10328                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
10329                         //printf("no-match due to branch\n");
10330                         break;
10331                       }
10332                       // call/ret fast path assumes no registers allocated
10333                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
10334                         break;
10335                       }
10336                       if(r>63) {
10337                         // NB This can exclude the case where the upper-half
10338                         // register is lower numbered than the lower-half
10339                         // register.  Not sure if it's worth fixing...
10340                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
10341                         if(regs[k-1].is32&(1LL<<(r&63))) break;
10342                       }
10343                       k--;
10344                     }
10345                     if(i<slen-1) {
10346                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
10347                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
10348                         //printf("bad match after branch\n");
10349                         break;
10350                       }
10351                     }
10352                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
10353                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
10354                       while(k<i) {
10355                         regs[k].regmap_entry[hr]=f_regmap[hr];
10356                         regs[k].regmap[hr]=f_regmap[hr];
10357                         regmap_pre[k+1][hr]=f_regmap[hr];
10358                         regs[k].wasdirty&=~(1<<hr);
10359                         regs[k].dirty&=~(1<<hr);
10360                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
10361                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
10362                         regs[k].wasconst&=~(1<<hr);
10363                         regs[k].isconst&=~(1<<hr);
10364                         k++;
10365                       }
10366                     }
10367                     else {
10368                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
10369                       break;
10370                     }
10371                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
10372                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
10373                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
10374                       regs[i].regmap_entry[hr]=f_regmap[hr];
10375                       regs[i].regmap[hr]=f_regmap[hr];
10376                       regs[i].wasdirty&=~(1<<hr);
10377                       regs[i].dirty&=~(1<<hr);
10378                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
10379                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
10380                       regs[i].wasconst&=~(1<<hr);
10381                       regs[i].isconst&=~(1<<hr);
10382                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
10383                       branch_regs[i].wasdirty&=~(1<<hr);
10384                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
10385                       branch_regs[i].regmap[hr]=f_regmap[hr];
10386                       branch_regs[i].dirty&=~(1<<hr);
10387                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
10388                       branch_regs[i].wasconst&=~(1<<hr);
10389                       branch_regs[i].isconst&=~(1<<hr);
10390                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
10391                         regmap_pre[i+2][hr]=f_regmap[hr];
10392                         regs[i+2].wasdirty&=~(1<<hr);
10393                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
10394                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
10395                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
10396                       }
10397                     }
10398                   }
10399                   for(k=t;k<j;k++) {
10400                     // Alloc register clean at beginning of loop,
10401                     // but may dirty it in pass 6
10402                     regs[k].regmap_entry[hr]=f_regmap[hr];
10403                     regs[k].regmap[hr]=f_regmap[hr];
10404                     regs[k].dirty&=~(1<<hr);
10405                     regs[k].wasconst&=~(1<<hr);
10406                     regs[k].isconst&=~(1<<hr);
10407                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
10408                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
10409                       branch_regs[k].regmap[hr]=f_regmap[hr];
10410                       branch_regs[k].dirty&=~(1<<hr);
10411                       branch_regs[k].wasconst&=~(1<<hr);
10412                       branch_regs[k].isconst&=~(1<<hr);
10413                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
10414                         regmap_pre[k+2][hr]=f_regmap[hr];
10415                         regs[k+2].wasdirty&=~(1<<hr);
10416                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
10417                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
10418                       }
10419                     }
10420                     else
10421                     {
10422                       regmap_pre[k+1][hr]=f_regmap[hr];
10423                       regs[k+1].wasdirty&=~(1<<hr);
10424                     }
10425                   }
10426                   if(regs[j].regmap[hr]==f_regmap[hr])
10427                     regs[j].regmap_entry[hr]=f_regmap[hr];
10428                   break;
10429                 }
10430                 if(j==i) break;
10431                 if(regs[j].regmap[hr]>=0)
10432                   break;
10433                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10434                   //printf("no-match due to different register\n");
10435                   break;
10436                 }
10437                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10438                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10439                   break;
10440                 }
10441                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10442                 {
10443                   // Stop on unconditional branch
10444                   break;
10445                 }
10446                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10447                 {
10448                   if(ooo[j]) {
10449                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1])
10450                       break;
10451                   }else{
10452                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1])
10453                       break;
10454                   }
10455                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10456                     //printf("no-match due to different register (branch)\n");
10457                     break;
10458                   }
10459                 }
10460                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10461                   //printf("No free regs for store %x\n",start+j*4);
10462                   break;
10463                 }
10464                 if(f_regmap[hr]>=64) {
10465                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10466                     break;
10467                   }
10468                   else
10469                   {
10470                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10471                       break;
10472                     }
10473                   }
10474                 }
10475               }
10476             }
10477           }
10478         }
10479       }
10480     }else{
10481       // Non branch or undetermined branch target
10482       for(hr=0;hr<HOST_REGS;hr++)
10483       {
10484         if(hr!=EXCLUDE_REG) {
10485           if(regs[i].regmap[hr]>64) {
10486             if(!((regs[i].dirty>>hr)&1))
10487               f_regmap[hr]=regs[i].regmap[hr];
10488           }
10489           else if(regs[i].regmap[hr]>=0) {
10490             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10491               // dealloc old register
10492               int n;
10493               for(n=0;n<HOST_REGS;n++)
10494               {
10495                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10496               }
10497               // and alloc new one
10498               f_regmap[hr]=regs[i].regmap[hr];
10499             }
10500           }
10501         }
10502       }
10503       // Try to restore cycle count at branch targets
10504       if(bt[i]) {
10505         for(j=i;j<slen-1;j++) {
10506           if(regs[j].regmap[HOST_CCREG]!=-1) break;
10507           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10508             //printf("no free regs for store %x\n",start+j*4);
10509             break;
10510           }
10511         }
10512         if(regs[j].regmap[HOST_CCREG]==CCREG) {
10513           int k=i;
10514           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10515           while(k<j) {
10516             regs[k].regmap_entry[HOST_CCREG]=CCREG;
10517             regs[k].regmap[HOST_CCREG]=CCREG;
10518             regmap_pre[k+1][HOST_CCREG]=CCREG;
10519             regs[k+1].wasdirty|=1<<HOST_CCREG;
10520             regs[k].dirty|=1<<HOST_CCREG;
10521             regs[k].wasconst&=~(1<<HOST_CCREG);
10522             regs[k].isconst&=~(1<<HOST_CCREG);
10523             k++;
10524           }
10525           regs[j].regmap_entry[HOST_CCREG]=CCREG;
10526         }
10527         // Work backwards from the branch target
10528         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10529         {
10530           //printf("Extend backwards\n");
10531           int k;
10532           k=i;
10533           while(regs[k-1].regmap[HOST_CCREG]==-1) {
10534             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10535               //printf("no free regs for store %x\n",start+(k-1)*4);
10536               break;
10537             }
10538             k--;
10539           }
10540           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10541             //printf("Extend CC, %x ->\n",start+k*4);
10542             while(k<=i) {
10543               regs[k].regmap_entry[HOST_CCREG]=CCREG;
10544               regs[k].regmap[HOST_CCREG]=CCREG;
10545               regmap_pre[k+1][HOST_CCREG]=CCREG;
10546               regs[k+1].wasdirty|=1<<HOST_CCREG;
10547               regs[k].dirty|=1<<HOST_CCREG;
10548               regs[k].wasconst&=~(1<<HOST_CCREG);
10549               regs[k].isconst&=~(1<<HOST_CCREG);
10550               k++;
10551             }
10552           }
10553           else {
10554             //printf("Fail Extend CC, %x ->\n",start+k*4);
10555           }
10556         }
10557       }
10558       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10559          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10560          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10561          itype[i]!=FCONV&&itype[i]!=FCOMP)
10562       {
10563         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10564       }
10565     }
10566   }
10567
10568   // Cache memory offset or tlb map pointer if a register is available
10569   #ifndef HOST_IMM_ADDR32
10570   #ifndef RAM_OFFSET
10571   if(using_tlb)
10572   #endif
10573   {
10574     int earliest_available[HOST_REGS];
10575     int loop_start[HOST_REGS];
10576     int score[HOST_REGS];
10577     int end[HOST_REGS];
10578     int reg=using_tlb?MMREG:ROREG;
10579
10580     // Init
10581     for(hr=0;hr<HOST_REGS;hr++) {
10582       score[hr]=0;earliest_available[hr]=0;
10583       loop_start[hr]=MAXBLOCK;
10584     }
10585     for(i=0;i<slen-1;i++)
10586     {
10587       // Can't do anything if no registers are available
10588       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
10589         for(hr=0;hr<HOST_REGS;hr++) {
10590           score[hr]=0;earliest_available[hr]=i+1;
10591           loop_start[hr]=MAXBLOCK;
10592         }
10593       }
10594       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10595         if(!ooo[i]) {
10596           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
10597             for(hr=0;hr<HOST_REGS;hr++) {
10598               score[hr]=0;earliest_available[hr]=i+1;
10599               loop_start[hr]=MAXBLOCK;
10600             }
10601           }
10602         }else{
10603           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
10604             for(hr=0;hr<HOST_REGS;hr++) {
10605               score[hr]=0;earliest_available[hr]=i+1;
10606               loop_start[hr]=MAXBLOCK;
10607             }
10608           }
10609         }
10610       }
10611       // Mark unavailable registers
10612       for(hr=0;hr<HOST_REGS;hr++) {
10613         if(regs[i].regmap[hr]>=0) {
10614           score[hr]=0;earliest_available[hr]=i+1;
10615           loop_start[hr]=MAXBLOCK;
10616         }
10617         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10618           if(branch_regs[i].regmap[hr]>=0) {
10619             score[hr]=0;earliest_available[hr]=i+2;
10620             loop_start[hr]=MAXBLOCK;
10621           }
10622         }
10623       }
10624       // No register allocations after unconditional jumps
10625       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10626       {
10627         for(hr=0;hr<HOST_REGS;hr++) {
10628           score[hr]=0;earliest_available[hr]=i+2;
10629           loop_start[hr]=MAXBLOCK;
10630         }
10631         i++; // Skip delay slot too
10632         //printf("skip delay slot: %x\n",start+i*4);
10633       }
10634       else
10635       // Possible match
10636       if(itype[i]==LOAD||itype[i]==LOADLR||
10637          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
10638         for(hr=0;hr<HOST_REGS;hr++) {
10639           if(hr!=EXCLUDE_REG) {
10640             end[hr]=i-1;
10641             for(j=i;j<slen-1;j++) {
10642               if(regs[j].regmap[hr]>=0) break;
10643               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10644                 if(branch_regs[j].regmap[hr]>=0) break;
10645                 if(ooo[j]) {
10646                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
10647                 }else{
10648                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
10649                 }
10650               }
10651               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
10652               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10653                 int t=(ba[j]-start)>>2;
10654                 if(t<j&&t>=earliest_available[hr]) {
10655                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
10656                     // Score a point for hoisting loop invariant
10657                     if(t<loop_start[hr]) loop_start[hr]=t;
10658                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
10659                     score[hr]++;
10660                     end[hr]=j;
10661                   }
10662                 }
10663                 else if(t<j) {
10664                   if(regs[t].regmap[hr]==reg) {
10665                     // Score a point if the branch target matches this register
10666                     score[hr]++;
10667                     end[hr]=j;
10668                   }
10669                 }
10670                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
10671                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
10672                   score[hr]++;
10673                   end[hr]=j;
10674                 }
10675               }
10676               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10677               {
10678                 // Stop on unconditional branch
10679                 break;
10680               }
10681               else
10682               if(itype[j]==LOAD||itype[j]==LOADLR||
10683                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
10684                 score[hr]++;
10685                 end[hr]=j;
10686               }
10687             }
10688           }
10689         }
10690         // Find highest score and allocate that register
10691         int maxscore=0;
10692         for(hr=0;hr<HOST_REGS;hr++) {
10693           if(hr!=EXCLUDE_REG) {
10694             if(score[hr]>score[maxscore]) {
10695               maxscore=hr;
10696               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
10697             }
10698           }
10699         }
10700         if(score[maxscore]>1)
10701         {
10702           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
10703           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
10704             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
10705             assert(regs[j].regmap[maxscore]<0);
10706             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
10707             regs[j].regmap[maxscore]=reg;
10708             regs[j].dirty&=~(1<<maxscore);
10709             regs[j].wasconst&=~(1<<maxscore);
10710             regs[j].isconst&=~(1<<maxscore);
10711             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10712               branch_regs[j].regmap[maxscore]=reg;
10713               branch_regs[j].wasdirty&=~(1<<maxscore);
10714               branch_regs[j].dirty&=~(1<<maxscore);
10715               branch_regs[j].wasconst&=~(1<<maxscore);
10716               branch_regs[j].isconst&=~(1<<maxscore);
10717               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
10718                 regmap_pre[j+2][maxscore]=reg;
10719                 regs[j+2].wasdirty&=~(1<<maxscore);
10720               }
10721               // loop optimization (loop_preload)
10722               int t=(ba[j]-start)>>2;
10723               if(t==loop_start[maxscore]) {
10724                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
10725                   regs[t].regmap_entry[maxscore]=reg;
10726               }
10727             }
10728             else
10729             {
10730               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
10731                 regmap_pre[j+1][maxscore]=reg;
10732                 regs[j+1].wasdirty&=~(1<<maxscore);
10733               }
10734             }
10735           }
10736           i=j-1;
10737           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
10738           for(hr=0;hr<HOST_REGS;hr++) {
10739             score[hr]=0;earliest_available[hr]=i+i;
10740             loop_start[hr]=MAXBLOCK;
10741           }
10742         }
10743       }
10744     }
10745   }
10746   #endif
10747
10748   // This allocates registers (if possible) one instruction prior
10749   // to use, which can avoid a load-use penalty on certain CPUs.
10750   for(i=0;i<slen-1;i++)
10751   {
10752     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10753     {
10754       if(!bt[i+1])
10755       {
10756         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10757            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10758         {
10759           if(rs1[i+1]) {
10760             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10761             {
10762               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10763               {
10764                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10765                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10766                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10767                 regs[i].isconst&=~(1<<hr);
10768                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10769                 constmap[i][hr]=constmap[i+1][hr];
10770                 regs[i+1].wasdirty&=~(1<<hr);
10771                 regs[i].dirty&=~(1<<hr);
10772               }
10773             }
10774           }
10775           if(rs2[i+1]) {
10776             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10777             {
10778               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10779               {
10780                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10781                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10782                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10783                 regs[i].isconst&=~(1<<hr);
10784                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10785                 constmap[i][hr]=constmap[i+1][hr];
10786                 regs[i+1].wasdirty&=~(1<<hr);
10787                 regs[i].dirty&=~(1<<hr);
10788               }
10789             }
10790           }
10791           // Preload target address for load instruction (non-constant)
10792           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10793             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10794             {
10795               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10796               {
10797                 regs[i].regmap[hr]=rs1[i+1];
10798                 regmap_pre[i+1][hr]=rs1[i+1];
10799                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10800                 regs[i].isconst&=~(1<<hr);
10801                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10802                 constmap[i][hr]=constmap[i+1][hr];
10803                 regs[i+1].wasdirty&=~(1<<hr);
10804                 regs[i].dirty&=~(1<<hr);
10805               }
10806             }
10807           }
10808           // Load source into target register
10809           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10810             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10811             {
10812               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10813               {
10814                 regs[i].regmap[hr]=rs1[i+1];
10815                 regmap_pre[i+1][hr]=rs1[i+1];
10816                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10817                 regs[i].isconst&=~(1<<hr);
10818                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10819                 constmap[i][hr]=constmap[i+1][hr];
10820                 regs[i+1].wasdirty&=~(1<<hr);
10821                 regs[i].dirty&=~(1<<hr);
10822               }
10823             }
10824           }
10825           // Preload map address
10826           #ifndef HOST_IMM_ADDR32
10827           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10828             hr=get_reg(regs[i+1].regmap,TLREG);
10829             if(hr>=0) {
10830               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10831               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10832                 int nr;
10833                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10834                 {
10835                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10836                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10837                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10838                   regs[i].isconst&=~(1<<hr);
10839                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10840                   constmap[i][hr]=constmap[i+1][hr];
10841                   regs[i+1].wasdirty&=~(1<<hr);
10842                   regs[i].dirty&=~(1<<hr);
10843                 }
10844                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10845                 {
10846                   // move it to another register
10847                   regs[i+1].regmap[hr]=-1;
10848                   regmap_pre[i+2][hr]=-1;
10849                   regs[i+1].regmap[nr]=TLREG;
10850                   regmap_pre[i+2][nr]=TLREG;
10851                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10852                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10853                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10854                   regs[i].isconst&=~(1<<nr);
10855                   regs[i+1].isconst&=~(1<<nr);
10856                   regs[i].dirty&=~(1<<nr);
10857                   regs[i+1].wasdirty&=~(1<<nr);
10858                   regs[i+1].dirty&=~(1<<nr);
10859                   regs[i+2].wasdirty&=~(1<<nr);
10860                 }
10861               }
10862             }
10863           }
10864           #endif
10865           // Address for store instruction (non-constant)
10866           if(itype[i+1]==STORE||itype[i+1]==STORELR
10867              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10868             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10869               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10870               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10871               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10872               assert(hr>=0);
10873               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10874               {
10875                 regs[i].regmap[hr]=rs1[i+1];
10876                 regmap_pre[i+1][hr]=rs1[i+1];
10877                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10878                 regs[i].isconst&=~(1<<hr);
10879                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10880                 constmap[i][hr]=constmap[i+1][hr];
10881                 regs[i+1].wasdirty&=~(1<<hr);
10882                 regs[i].dirty&=~(1<<hr);
10883               }
10884             }
10885           }
10886           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10887             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10888               int nr;
10889               hr=get_reg(regs[i+1].regmap,FTEMP);
10890               assert(hr>=0);
10891               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10892               {
10893                 regs[i].regmap[hr]=rs1[i+1];
10894                 regmap_pre[i+1][hr]=rs1[i+1];
10895                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10896                 regs[i].isconst&=~(1<<hr);
10897                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10898                 constmap[i][hr]=constmap[i+1][hr];
10899                 regs[i+1].wasdirty&=~(1<<hr);
10900                 regs[i].dirty&=~(1<<hr);
10901               }
10902               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10903               {
10904                 // move it to another register
10905                 regs[i+1].regmap[hr]=-1;
10906                 regmap_pre[i+2][hr]=-1;
10907                 regs[i+1].regmap[nr]=FTEMP;
10908                 regmap_pre[i+2][nr]=FTEMP;
10909                 regs[i].regmap[nr]=rs1[i+1];
10910                 regmap_pre[i+1][nr]=rs1[i+1];
10911                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10912                 regs[i].isconst&=~(1<<nr);
10913                 regs[i+1].isconst&=~(1<<nr);
10914                 regs[i].dirty&=~(1<<nr);
10915                 regs[i+1].wasdirty&=~(1<<nr);
10916                 regs[i+1].dirty&=~(1<<nr);
10917                 regs[i+2].wasdirty&=~(1<<nr);
10918               }
10919             }
10920           }
10921           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10922             if(itype[i+1]==LOAD)
10923               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10924             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10925               hr=get_reg(regs[i+1].regmap,FTEMP);
10926             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10927               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10928               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10929             }
10930             if(hr>=0&&regs[i].regmap[hr]<0) {
10931               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10932               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10933                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10934                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10935                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10936                 regs[i].isconst&=~(1<<hr);
10937                 regs[i+1].wasdirty&=~(1<<hr);
10938                 regs[i].dirty&=~(1<<hr);
10939               }
10940             }
10941           }
10942         }
10943       }
10944     }
10945   }
10946
10947   /* Pass 6 - Optimize clean/dirty state */
10948   clean_registers(0,slen-1,1);
10949
10950   /* Pass 7 - Identify 32-bit registers */
10951 #ifndef FORCE32
10952   provisional_r32();
10953
10954   u_int r32=0;
10955
10956   for (i=slen-1;i>=0;i--)
10957   {
10958     int hr;
10959     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10960     {
10961       if(ba[i]<start || ba[i]>=(start+slen*4))
10962       {
10963         // Branch out of this block, don't need anything
10964         r32=0;
10965       }
10966       else
10967       {
10968         // Internal branch
10969         // Need whatever matches the target
10970         // (and doesn't get overwritten by the delay slot instruction)
10971         r32=0;
10972         int t=(ba[i]-start)>>2;
10973         if(ba[i]>start+i*4) {
10974           // Forward branch
10975           if(!(requires_32bit[t]&~regs[i].was32))
10976             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10977         }else{
10978           // Backward branch
10979           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10980           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10981           if(!(pr32[t]&~regs[i].was32))
10982             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10983         }
10984       }
10985       // Conditional branch may need registers for following instructions
10986       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10987       {
10988         if(i<slen-2) {
10989           r32|=requires_32bit[i+2];
10990           r32&=regs[i].was32;
10991           // Mark this address as a branch target since it may be called
10992           // upon return from interrupt
10993           bt[i+2]=1;
10994         }
10995       }
10996       // Merge in delay slot
10997       if(!likely[i]) {
10998         // These are overwritten unless the branch is "likely"
10999         // and the delay slot is nullified if not taken
11000         r32&=~(1LL<<rt1[i+1]);
11001         r32&=~(1LL<<rt2[i+1]);
11002       }
11003       // Assume these are needed (delay slot)
11004       if(us1[i+1]>0)
11005       {
11006         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
11007       }
11008       if(us2[i+1]>0)
11009       {
11010         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
11011       }
11012       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
11013       {
11014         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
11015       }
11016       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
11017       {
11018         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
11019       }
11020     }
11021     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
11022     {
11023       // SYSCALL instruction (software interrupt)
11024       r32=0;
11025     }
11026     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
11027     {
11028       // ERET instruction (return from interrupt)
11029       r32=0;
11030     }
11031     // Check 32 bits
11032     r32&=~(1LL<<rt1[i]);
11033     r32&=~(1LL<<rt2[i]);
11034     if(us1[i]>0)
11035     {
11036       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
11037     }
11038     if(us2[i]>0)
11039     {
11040       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
11041     }
11042     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
11043     {
11044       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
11045     }
11046     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
11047     {
11048       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
11049     }
11050     requires_32bit[i]=r32;
11051
11052     // Dirty registers which are 32-bit, require 32-bit input
11053     // as they will be written as 32-bit values
11054     for(hr=0;hr<HOST_REGS;hr++)
11055     {
11056       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
11057         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
11058           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
11059           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
11060         }
11061       }
11062     }
11063     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
11064   }
11065 #else
11066   for (i=slen-1;i>=0;i--)
11067   {
11068     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11069     {
11070       // Conditional branch
11071       if((source[i]>>16)!=0x1000&&i<slen-2) {
11072         // Mark this address as a branch target since it may be called
11073         // upon return from interrupt
11074         bt[i+2]=1;
11075       }
11076     }
11077   }
11078 #endif
11079
11080   if(itype[slen-1]==SPAN) {
11081     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
11082   }
11083
11084 #ifdef DISASM
11085   /* Debug/disassembly */
11086   for(i=0;i<slen;i++)
11087   {
11088     printf("U:");
11089     int r;
11090     for(r=1;r<=CCREG;r++) {
11091       if((unneeded_reg[i]>>r)&1) {
11092         if(r==HIREG) printf(" HI");
11093         else if(r==LOREG) printf(" LO");
11094         else printf(" r%d",r);
11095       }
11096     }
11097 #ifndef FORCE32
11098     printf(" UU:");
11099     for(r=1;r<=CCREG;r++) {
11100       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
11101         if(r==HIREG) printf(" HI");
11102         else if(r==LOREG) printf(" LO");
11103         else printf(" r%d",r);
11104       }
11105     }
11106     printf(" 32:");
11107     for(r=0;r<=CCREG;r++) {
11108       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
11109       if((regs[i].was32>>r)&1) {
11110         if(r==CCREG) printf(" CC");
11111         else if(r==HIREG) printf(" HI");
11112         else if(r==LOREG) printf(" LO");
11113         else printf(" r%d",r);
11114       }
11115     }
11116 #endif
11117     printf("\n");
11118     #if defined(__i386__) || defined(__x86_64__)
11119     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
11120     #endif
11121     #ifdef __arm__
11122     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
11123     #endif
11124     printf("needs: ");
11125     if(needed_reg[i]&1) printf("eax ");
11126     if((needed_reg[i]>>1)&1) printf("ecx ");
11127     if((needed_reg[i]>>2)&1) printf("edx ");
11128     if((needed_reg[i]>>3)&1) printf("ebx ");
11129     if((needed_reg[i]>>5)&1) printf("ebp ");
11130     if((needed_reg[i]>>6)&1) printf("esi ");
11131     if((needed_reg[i]>>7)&1) printf("edi ");
11132     printf("r:");
11133     for(r=0;r<=CCREG;r++) {
11134       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
11135       if((requires_32bit[i]>>r)&1) {
11136         if(r==CCREG) printf(" CC");
11137         else if(r==HIREG) printf(" HI");
11138         else if(r==LOREG) printf(" LO");
11139         else printf(" r%d",r);
11140       }
11141     }
11142     printf("\n");
11143     /*printf("pr:");
11144     for(r=0;r<=CCREG;r++) {
11145       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
11146       if((pr32[i]>>r)&1) {
11147         if(r==CCREG) printf(" CC");
11148         else if(r==HIREG) printf(" HI");
11149         else if(r==LOREG) printf(" LO");
11150         else printf(" r%d",r);
11151       }
11152     }
11153     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
11154     printf("\n");*/
11155     #if defined(__i386__) || defined(__x86_64__)
11156     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
11157     printf("dirty: ");
11158     if(regs[i].wasdirty&1) printf("eax ");
11159     if((regs[i].wasdirty>>1)&1) printf("ecx ");
11160     if((regs[i].wasdirty>>2)&1) printf("edx ");
11161     if((regs[i].wasdirty>>3)&1) printf("ebx ");
11162     if((regs[i].wasdirty>>5)&1) printf("ebp ");
11163     if((regs[i].wasdirty>>6)&1) printf("esi ");
11164     if((regs[i].wasdirty>>7)&1) printf("edi ");
11165     #endif
11166     #ifdef __arm__
11167     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
11168     printf("dirty: ");
11169     if(regs[i].wasdirty&1) printf("r0 ");
11170     if((regs[i].wasdirty>>1)&1) printf("r1 ");
11171     if((regs[i].wasdirty>>2)&1) printf("r2 ");
11172     if((regs[i].wasdirty>>3)&1) printf("r3 ");
11173     if((regs[i].wasdirty>>4)&1) printf("r4 ");
11174     if((regs[i].wasdirty>>5)&1) printf("r5 ");
11175     if((regs[i].wasdirty>>6)&1) printf("r6 ");
11176     if((regs[i].wasdirty>>7)&1) printf("r7 ");
11177     if((regs[i].wasdirty>>8)&1) printf("r8 ");
11178     if((regs[i].wasdirty>>9)&1) printf("r9 ");
11179     if((regs[i].wasdirty>>10)&1) printf("r10 ");
11180     if((regs[i].wasdirty>>12)&1) printf("r12 ");
11181     #endif
11182     printf("\n");
11183     disassemble_inst(i);
11184     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
11185     #if defined(__i386__) || defined(__x86_64__)
11186     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
11187     if(regs[i].dirty&1) printf("eax ");
11188     if((regs[i].dirty>>1)&1) printf("ecx ");
11189     if((regs[i].dirty>>2)&1) printf("edx ");
11190     if((regs[i].dirty>>3)&1) printf("ebx ");
11191     if((regs[i].dirty>>5)&1) printf("ebp ");
11192     if((regs[i].dirty>>6)&1) printf("esi ");
11193     if((regs[i].dirty>>7)&1) printf("edi ");
11194     #endif
11195     #ifdef __arm__
11196     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
11197     if(regs[i].dirty&1) printf("r0 ");
11198     if((regs[i].dirty>>1)&1) printf("r1 ");
11199     if((regs[i].dirty>>2)&1) printf("r2 ");
11200     if((regs[i].dirty>>3)&1) printf("r3 ");
11201     if((regs[i].dirty>>4)&1) printf("r4 ");
11202     if((regs[i].dirty>>5)&1) printf("r5 ");
11203     if((regs[i].dirty>>6)&1) printf("r6 ");
11204     if((regs[i].dirty>>7)&1) printf("r7 ");
11205     if((regs[i].dirty>>8)&1) printf("r8 ");
11206     if((regs[i].dirty>>9)&1) printf("r9 ");
11207     if((regs[i].dirty>>10)&1) printf("r10 ");
11208     if((regs[i].dirty>>12)&1) printf("r12 ");
11209     #endif
11210     printf("\n");
11211     if(regs[i].isconst) {
11212       printf("constants: ");
11213       #if defined(__i386__) || defined(__x86_64__)
11214       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
11215       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
11216       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
11217       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
11218       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
11219       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
11220       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
11221       #endif
11222       #ifdef __arm__
11223       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
11224       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
11225       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
11226       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
11227       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
11228       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
11229       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
11230       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
11231       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
11232       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
11233       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
11234       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
11235       #endif
11236       printf("\n");
11237     }
11238 #ifndef FORCE32
11239     printf(" 32:");
11240     for(r=0;r<=CCREG;r++) {
11241       if((regs[i].is32>>r)&1) {
11242         if(r==CCREG) printf(" CC");
11243         else if(r==HIREG) printf(" HI");
11244         else if(r==LOREG) printf(" LO");
11245         else printf(" r%d",r);
11246       }
11247     }
11248     printf("\n");
11249 #endif
11250     /*printf(" p32:");
11251     for(r=0;r<=CCREG;r++) {
11252       if((p32[i]>>r)&1) {
11253         if(r==CCREG) printf(" CC");
11254         else if(r==HIREG) printf(" HI");
11255         else if(r==LOREG) printf(" LO");
11256         else printf(" r%d",r);
11257       }
11258     }
11259     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
11260     else printf("\n");*/
11261     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
11262       #if defined(__i386__) || defined(__x86_64__)
11263       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
11264       if(branch_regs[i].dirty&1) printf("eax ");
11265       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
11266       if((branch_regs[i].dirty>>2)&1) printf("edx ");
11267       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
11268       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
11269       if((branch_regs[i].dirty>>6)&1) printf("esi ");
11270       if((branch_regs[i].dirty>>7)&1) printf("edi ");
11271       #endif
11272       #ifdef __arm__
11273       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
11274       if(branch_regs[i].dirty&1) printf("r0 ");
11275       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
11276       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
11277       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
11278       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
11279       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
11280       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
11281       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
11282       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
11283       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
11284       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
11285       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
11286       #endif
11287 #ifndef FORCE32
11288       printf(" 32:");
11289       for(r=0;r<=CCREG;r++) {
11290         if((branch_regs[i].is32>>r)&1) {
11291           if(r==CCREG) printf(" CC");
11292           else if(r==HIREG) printf(" HI");
11293           else if(r==LOREG) printf(" LO");
11294           else printf(" r%d",r);
11295         }
11296       }
11297       printf("\n");
11298 #endif
11299     }
11300   }
11301 #endif // DISASM
11302
11303   /* Pass 8 - Assembly */
11304   linkcount=0;stubcount=0;
11305   ds=0;is_delayslot=0;
11306   cop1_usable=0;
11307   uint64_t is32_pre=0;
11308   u_int dirty_pre=0;
11309   u_int beginning=(u_int)out;
11310   if((u_int)addr&1) {
11311     ds=1;
11312     pagespan_ds();
11313   }
11314   u_int instr_addr0_override=0;
11315
11316 #ifdef PCSX
11317   if (start == 0x80030000) {
11318     // nasty hack for fastbios thing
11319     // override block entry to this code
11320     instr_addr0_override=(u_int)out;
11321     emit_movimm(start,0);
11322     // abuse io address var as a flag that we
11323     // have already returned here once
11324     emit_readword((int)&address,1);
11325     emit_writeword(0,(int)&pcaddr);
11326     emit_writeword(0,(int)&address);
11327     emit_cmp(0,1);
11328     emit_jne((int)new_dyna_leave);
11329   }
11330 #endif
11331   for(i=0;i<slen;i++)
11332   {
11333     //if(ds) printf("ds: ");
11334     disassemble_inst(i);
11335     if(ds) {
11336       ds=0; // Skip delay slot
11337       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
11338       instr_addr[i]=0;
11339     } else {
11340       speculate_register_values(i);
11341       #ifndef DESTRUCTIVE_WRITEBACK
11342       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11343       {
11344         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
11345               unneeded_reg[i],unneeded_reg_upper[i]);
11346         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
11347               unneeded_reg[i],unneeded_reg_upper[i]);
11348       }
11349       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
11350         is32_pre=branch_regs[i].is32;
11351         dirty_pre=branch_regs[i].dirty;
11352       }else{
11353         is32_pre=regs[i].is32;
11354         dirty_pre=regs[i].dirty;
11355       }
11356       #endif
11357       // write back
11358       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11359       {
11360         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
11361                       unneeded_reg[i],unneeded_reg_upper[i]);
11362         loop_preload(regmap_pre[i],regs[i].regmap_entry);
11363       }
11364       // branch target entry point
11365       instr_addr[i]=(u_int)out;
11366       assem_debug("<->\n");
11367       // load regs
11368       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
11369         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
11370       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
11371       address_generation(i,&regs[i],regs[i].regmap_entry);
11372       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
11373       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11374       {
11375         // Load the delay slot registers if necessary
11376         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
11377           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11378         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
11379           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11380         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
11381           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11382       }
11383       else if(i+1<slen)
11384       {
11385         // Preload registers for following instruction
11386         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
11387           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
11388             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11389         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
11390           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
11391             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11392       }
11393       // TODO: if(is_ooo(i)) address_generation(i+1);
11394       if(itype[i]==CJUMP||itype[i]==FJUMP)
11395         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
11396       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
11397         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11398       if(bt[i]) cop1_usable=0;
11399       // assemble
11400       switch(itype[i]) {
11401         case ALU:
11402           alu_assemble(i,&regs[i]);break;
11403         case IMM16:
11404           imm16_assemble(i,&regs[i]);break;
11405         case SHIFT:
11406           shift_assemble(i,&regs[i]);break;
11407         case SHIFTIMM:
11408           shiftimm_assemble(i,&regs[i]);break;
11409         case LOAD:
11410           load_assemble(i,&regs[i]);break;
11411         case LOADLR:
11412           loadlr_assemble(i,&regs[i]);break;
11413         case STORE:
11414           store_assemble(i,&regs[i]);break;
11415         case STORELR:
11416           storelr_assemble(i,&regs[i]);break;
11417         case COP0:
11418           cop0_assemble(i,&regs[i]);break;
11419         case COP1:
11420           cop1_assemble(i,&regs[i]);break;
11421         case C1LS:
11422           c1ls_assemble(i,&regs[i]);break;
11423         case COP2:
11424           cop2_assemble(i,&regs[i]);break;
11425         case C2LS:
11426           c2ls_assemble(i,&regs[i]);break;
11427         case C2OP:
11428           c2op_assemble(i,&regs[i]);break;
11429         case FCONV:
11430           fconv_assemble(i,&regs[i]);break;
11431         case FLOAT:
11432           float_assemble(i,&regs[i]);break;
11433         case FCOMP:
11434           fcomp_assemble(i,&regs[i]);break;
11435         case MULTDIV:
11436           multdiv_assemble(i,&regs[i]);break;
11437         case MOV:
11438           mov_assemble(i,&regs[i]);break;
11439         case SYSCALL:
11440           syscall_assemble(i,&regs[i]);break;
11441         case HLECALL:
11442           hlecall_assemble(i,&regs[i]);break;
11443         case INTCALL:
11444           intcall_assemble(i,&regs[i]);break;
11445         case UJUMP:
11446           ujump_assemble(i,&regs[i]);ds=1;break;
11447         case RJUMP:
11448           rjump_assemble(i,&regs[i]);ds=1;break;
11449         case CJUMP:
11450           cjump_assemble(i,&regs[i]);ds=1;break;
11451         case SJUMP:
11452           sjump_assemble(i,&regs[i]);ds=1;break;
11453         case FJUMP:
11454           fjump_assemble(i,&regs[i]);ds=1;break;
11455         case SPAN:
11456           pagespan_assemble(i,&regs[i]);break;
11457       }
11458       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
11459         literal_pool(1024);
11460       else
11461         literal_pool_jumpover(256);
11462     }
11463   }
11464   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
11465   // If the block did not end with an unconditional branch,
11466   // add a jump to the next instruction.
11467   if(i>1) {
11468     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
11469       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11470       assert(i==slen);
11471       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
11472         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11473         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11474           emit_loadreg(CCREG,HOST_CCREG);
11475         emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
11476       }
11477       else if(!likely[i-2])
11478       {
11479         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
11480         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
11481       }
11482       else
11483       {
11484         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
11485         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
11486       }
11487       add_to_linker((int)out,start+i*4,0);
11488       emit_jmp(0);
11489     }
11490   }
11491   else
11492   {
11493     assert(i>0);
11494     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11495     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11496     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11497       emit_loadreg(CCREG,HOST_CCREG);
11498     emit_addimm(HOST_CCREG,CLOCK_ADJUST(ccadj[i-1]+1),HOST_CCREG);
11499     add_to_linker((int)out,start+i*4,0);
11500     emit_jmp(0);
11501   }
11502
11503   // TODO: delay slot stubs?
11504   // Stubs
11505   for(i=0;i<stubcount;i++)
11506   {
11507     switch(stubs[i][0])
11508     {
11509       case LOADB_STUB:
11510       case LOADH_STUB:
11511       case LOADW_STUB:
11512       case LOADD_STUB:
11513       case LOADBU_STUB:
11514       case LOADHU_STUB:
11515         do_readstub(i);break;
11516       case STOREB_STUB:
11517       case STOREH_STUB:
11518       case STOREW_STUB:
11519       case STORED_STUB:
11520         do_writestub(i);break;
11521       case CC_STUB:
11522         do_ccstub(i);break;
11523       case INVCODE_STUB:
11524         do_invstub(i);break;
11525       case FP_STUB:
11526         do_cop1stub(i);break;
11527       case STORELR_STUB:
11528         do_unalignedwritestub(i);break;
11529     }
11530   }
11531
11532   if (instr_addr0_override)
11533     instr_addr[0] = instr_addr0_override;
11534
11535   /* Pass 9 - Linker */
11536   for(i=0;i<linkcount;i++)
11537   {
11538     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
11539     literal_pool(64);
11540     if(!link_addr[i][2])
11541     {
11542       void *stub=out;
11543       void *addr=check_addr(link_addr[i][1]);
11544       emit_extjump(link_addr[i][0],link_addr[i][1]);
11545       if(addr) {
11546         set_jump_target(link_addr[i][0],(int)addr);
11547         add_link(link_addr[i][1],stub);
11548       }
11549       else set_jump_target(link_addr[i][0],(int)stub);
11550     }
11551     else
11552     {
11553       // Internal branch
11554       int target=(link_addr[i][1]-start)>>2;
11555       assert(target>=0&&target<slen);
11556       assert(instr_addr[target]);
11557       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11558       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
11559       //#else
11560       set_jump_target(link_addr[i][0],instr_addr[target]);
11561       //#endif
11562     }
11563   }
11564   // External Branch Targets (jump_in)
11565   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
11566   for(i=0;i<slen;i++)
11567   {
11568     if(bt[i]||i==0)
11569     {
11570       if(instr_addr[i]) // TODO - delay slots (=null)
11571       {
11572         u_int vaddr=start+i*4;
11573         u_int page=get_page(vaddr);
11574         u_int vpage=get_vpage(vaddr);
11575         literal_pool(256);
11576         {
11577           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11578           assem_debug("jump_in: %x\n",start+i*4);
11579           ll_add(jump_dirty+vpage,vaddr,(void *)out);
11580           int entry_point=do_dirty_stub(i);
11581           ll_add_flags(jump_in+page,vaddr,state_rflags,(void *)entry_point);
11582           // If there was an existing entry in the hash table,
11583           // replace it with the new address.
11584           // Don't add new entries.  We'll insert the
11585           // ones that actually get used in check_addr().
11586           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
11587           if(ht_bin[0]==vaddr) {
11588             ht_bin[1]=entry_point;
11589           }
11590           if(ht_bin[2]==vaddr) {
11591             ht_bin[3]=entry_point;
11592           }
11593         }
11594       }
11595     }
11596   }
11597   // Write out the literal pool if necessary
11598   literal_pool(0);
11599   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11600   // Align code
11601   if(((u_int)out)&7) emit_addnop(13);
11602   #endif
11603   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
11604   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11605   memcpy(copy,source,slen*4);
11606   copy+=slen*4;
11607
11608   #ifdef __arm__
11609   __clear_cache((void *)beginning,out);
11610   #endif
11611
11612   // If we're within 256K of the end of the buffer,
11613   // start over from the beginning. (Is 256K enough?)
11614   if((u_int)out>(u_int)BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11615
11616   // Trap writes to any of the pages we compiled
11617   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11618     invalid_code[i]=0;
11619 #ifndef DISABLE_TLB
11620     memory_map[i]|=0x40000000;
11621     if((signed int)start>=(signed int)0xC0000000) {
11622       assert(using_tlb);
11623       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11624       invalid_code[j]=0;
11625       memory_map[j]|=0x40000000;
11626       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11627     }
11628 #endif
11629   }
11630   inv_code_start=inv_code_end=~0;
11631 #ifdef PCSX
11632   // for PCSX we need to mark all mirrors too
11633   if(get_page(start)<(RAM_SIZE>>12))
11634     for(i=start>>12;i<=(start+slen*4)>>12;i++)
11635       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
11636       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
11637       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
11638 #endif
11639
11640   /* Pass 10 - Free memory by expiring oldest blocks */
11641
11642   int end=((((int)out-(int)BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11643   while(expirep!=end)
11644   {
11645     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11646     int base=(int)BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11647     inv_debug("EXP: Phase %d\n",expirep);
11648     switch((expirep>>11)&3)
11649     {
11650       case 0:
11651         // Clear jump_in and jump_dirty
11652         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11653         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11654         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11655         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11656         break;
11657       case 1:
11658         // Clear pointers
11659         ll_kill_pointers(jump_out[expirep&2047],base,shift);
11660         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11661         break;
11662       case 2:
11663         // Clear hash table
11664         for(i=0;i<32;i++) {
11665           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11666           if((ht_bin[3]>>shift)==(base>>shift) ||
11667              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11668             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11669             ht_bin[2]=ht_bin[3]=-1;
11670           }
11671           if((ht_bin[1]>>shift)==(base>>shift) ||
11672              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11673             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11674             ht_bin[0]=ht_bin[2];
11675             ht_bin[1]=ht_bin[3];
11676             ht_bin[2]=ht_bin[3]=-1;
11677           }
11678         }
11679         break;
11680       case 3:
11681         // Clear jump_out
11682         #ifdef __arm__
11683         if((expirep&2047)==0)
11684           do_clear_cache();
11685         #endif
11686         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11687         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11688         break;
11689     }
11690     expirep=(expirep+1)&65535;
11691   }
11692   return 0;
11693 }
11694
11695 // vim:shiftwidth=2:expandtab