try to support more compilers
[pcsx_rearmed.git] / libpcsxcore / new_dynarec / new_dynarec.c
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  *   Mupen64plus - new_dynarec.c                                           *
3  *   Copyright (C) 2009-2011 Ari64                                         *
4  *                                                                         *
5  *   This program is free software; you can redistribute it and/or modify  *
6  *   it under the terms of the GNU General Public License as published by  *
7  *   the Free Software Foundation; either version 2 of the License, or     *
8  *   (at your option) any later version.                                   *
9  *                                                                         *
10  *   This program is distributed in the hope that it will be useful,       *
11  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
13  *   GNU General Public License for more details.                          *
14  *                                                                         *
15  *   You should have received a copy of the GNU General Public License     *
16  *   along with this program; if not, write to the                         *
17  *   Free Software Foundation, Inc.,                                       *
18  *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.          *
19  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
20
21 #include <stdlib.h>
22 #include <stdint.h> //include for uint64_t
23 #include <assert.h>
24 #include <sys/mman.h>
25
26 #include "emu_if.h" //emulator interface
27
28 //#define DISASM
29 //#define assem_debug printf
30 //#define inv_debug printf
31 #define assem_debug(...)
32 #define inv_debug(...)
33
34 #ifdef __i386__
35 #include "assem_x86.h"
36 #endif
37 #ifdef __x86_64__
38 #include "assem_x64.h"
39 #endif
40 #ifdef __arm__
41 #include "assem_arm.h"
42 #endif
43
44 #define MAXBLOCK 4096
45 #define MAX_OUTPUT_BLOCK_SIZE 262144
46 #define CLOCK_DIVIDER 2
47
48 struct regstat
49 {
50   signed char regmap_entry[HOST_REGS];
51   signed char regmap[HOST_REGS];
52   uint64_t was32;
53   uint64_t is32;
54   uint64_t wasdirty;
55   uint64_t dirty;
56   uint64_t u;
57   uint64_t uu;
58   u_int wasconst;
59   u_int isconst;
60   uint64_t constmap[HOST_REGS];
61 };
62
63 struct ll_entry
64 {
65   u_int vaddr;
66   u_int reg32;
67   void *addr;
68   struct ll_entry *next;
69 };
70
71   u_int start;
72   u_int *source;
73   u_int pagelimit;
74   char insn[MAXBLOCK][10];
75   u_char itype[MAXBLOCK];
76   u_char opcode[MAXBLOCK];
77   u_char opcode2[MAXBLOCK];
78   u_char bt[MAXBLOCK];
79   u_char rs1[MAXBLOCK];
80   u_char rs2[MAXBLOCK];
81   u_char rt1[MAXBLOCK];
82   u_char rt2[MAXBLOCK];
83   u_char us1[MAXBLOCK];
84   u_char us2[MAXBLOCK];
85   u_char dep1[MAXBLOCK];
86   u_char dep2[MAXBLOCK];
87   u_char lt1[MAXBLOCK];
88   static uint64_t gte_rs[MAXBLOCK]; // gte: 32 data and 32 ctl regs
89   static uint64_t gte_rt[MAXBLOCK];
90   static uint64_t gte_unneeded[MAXBLOCK];
91   static int gte_reads_flags; // gte flag read encountered
92   int imm[MAXBLOCK];
93   u_int ba[MAXBLOCK];
94   char likely[MAXBLOCK];
95   char is_ds[MAXBLOCK];
96   char ooo[MAXBLOCK];
97   uint64_t unneeded_reg[MAXBLOCK];
98   uint64_t unneeded_reg_upper[MAXBLOCK];
99   uint64_t branch_unneeded_reg[MAXBLOCK];
100   uint64_t branch_unneeded_reg_upper[MAXBLOCK];
101   uint64_t p32[MAXBLOCK];
102   uint64_t pr32[MAXBLOCK];
103   signed char regmap_pre[MAXBLOCK][HOST_REGS];
104   signed char regmap[MAXBLOCK][HOST_REGS];
105   signed char regmap_entry[MAXBLOCK][HOST_REGS];
106   uint64_t constmap[MAXBLOCK][HOST_REGS];
107   struct regstat regs[MAXBLOCK];
108   struct regstat branch_regs[MAXBLOCK];
109   signed char minimum_free_regs[MAXBLOCK];
110   u_int needed_reg[MAXBLOCK];
111   uint64_t requires_32bit[MAXBLOCK];
112   u_int wont_dirty[MAXBLOCK];
113   u_int will_dirty[MAXBLOCK];
114   int ccadj[MAXBLOCK];
115   int slen;
116   u_int instr_addr[MAXBLOCK];
117   u_int link_addr[MAXBLOCK][3];
118   int linkcount;
119   u_int stubs[MAXBLOCK*3][8];
120   int stubcount;
121   u_int literals[1024][2];
122   int literalcount;
123   int is_delayslot;
124   int cop1_usable;
125   u_char *out;
126   struct ll_entry *jump_in[4096];
127   struct ll_entry *jump_out[4096];
128   struct ll_entry *jump_dirty[4096];
129   u_int hash_table[65536][4]  __attribute__((aligned(16)));
130   char shadow[1048576]  __attribute__((aligned(16)));
131   void *copy;
132   int expirep;
133 #ifndef PCSX
134   u_int using_tlb;
135 #else
136   static const u_int using_tlb=0;
137 #endif
138   static u_int sp_in_mirror;
139   u_int stop_after_jal;
140   extern u_char restore_candidate[512];
141   extern int cycle_count;
142
143   /* registers that may be allocated */
144   /* 1-31 gpr */
145 #define HIREG 32 // hi
146 #define LOREG 33 // lo
147 #define FSREG 34 // FPU status (FCSR)
148 #define CSREG 35 // Coprocessor status
149 #define CCREG 36 // Cycle count
150 #define INVCP 37 // Pointer to invalid_code
151 #define MMREG 38 // Pointer to memory_map
152 #define ROREG 39 // ram offset (if rdram!=0x80000000)
153 #define TEMPREG 40
154 #define FTEMP 40 // FPU temporary register
155 #define PTEMP 41 // Prefetch temporary register
156 #define TLREG 42 // TLB mapping offset
157 #define RHASH 43 // Return address hash
158 #define RHTBL 44 // Return address hash table address
159 #define RTEMP 45 // JR/JALR address register
160 #define MAXREG 45
161 #define AGEN1 46 // Address generation temporary register
162 #define AGEN2 47 // Address generation temporary register
163 #define MGEN1 48 // Maptable address generation temporary register
164 #define MGEN2 49 // Maptable address generation temporary register
165 #define BTREG 50 // Branch target temporary register
166
167   /* instruction types */
168 #define NOP 0     // No operation
169 #define LOAD 1    // Load
170 #define STORE 2   // Store
171 #define LOADLR 3  // Unaligned load
172 #define STORELR 4 // Unaligned store
173 #define MOV 5     // Move 
174 #define ALU 6     // Arithmetic/logic
175 #define MULTDIV 7 // Multiply/divide
176 #define SHIFT 8   // Shift by register
177 #define SHIFTIMM 9// Shift by immediate
178 #define IMM16 10  // 16-bit immediate
179 #define RJUMP 11  // Unconditional jump to register
180 #define UJUMP 12  // Unconditional jump
181 #define CJUMP 13  // Conditional branch (BEQ/BNE/BGTZ/BLEZ)
182 #define SJUMP 14  // Conditional branch (regimm format)
183 #define COP0 15   // Coprocessor 0
184 #define COP1 16   // Coprocessor 1
185 #define C1LS 17   // Coprocessor 1 load/store
186 #define FJUMP 18  // Conditional branch (floating point)
187 #define FLOAT 19  // Floating point unit
188 #define FCONV 20  // Convert integer to float
189 #define FCOMP 21  // Floating point compare (sets FSREG)
190 #define SYSCALL 22// SYSCALL
191 #define OTHER 23  // Other
192 #define SPAN 24   // Branch/delay slot spans 2 pages
193 #define NI 25     // Not implemented
194 #define HLECALL 26// PCSX fake opcodes for HLE
195 #define COP2 27   // Coprocessor 2 move
196 #define C2LS 28   // Coprocessor 2 load/store
197 #define C2OP 29   // Coprocessor 2 operation
198 #define INTCALL 30// Call interpreter to handle rare corner cases
199
200   /* stubs */
201 #define CC_STUB 1
202 #define FP_STUB 2
203 #define LOADB_STUB 3
204 #define LOADH_STUB 4
205 #define LOADW_STUB 5
206 #define LOADD_STUB 6
207 #define LOADBU_STUB 7
208 #define LOADHU_STUB 8
209 #define STOREB_STUB 9
210 #define STOREH_STUB 10
211 #define STOREW_STUB 11
212 #define STORED_STUB 12
213 #define STORELR_STUB 13
214 #define INVCODE_STUB 14
215
216   /* branch codes */
217 #define TAKEN 1
218 #define NOTTAKEN 2
219 #define NULLDS 3
220
221 // asm linkage
222 int new_recompile_block(int addr);
223 void *get_addr_ht(u_int vaddr);
224 void invalidate_block(u_int block);
225 void invalidate_addr(u_int addr);
226 void remove_hash(int vaddr);
227 void jump_vaddr();
228 void dyna_linker();
229 void dyna_linker_ds();
230 void verify_code();
231 void verify_code_vm();
232 void verify_code_ds();
233 void cc_interrupt();
234 void fp_exception();
235 void fp_exception_ds();
236 void jump_syscall();
237 void jump_syscall_hle();
238 void jump_eret();
239 void jump_hlecall();
240 void jump_intcall();
241 void new_dyna_leave();
242
243 // TLB
244 void TLBWI_new();
245 void TLBWR_new();
246 void read_nomem_new();
247 void read_nomemb_new();
248 void read_nomemh_new();
249 void read_nomemd_new();
250 void write_nomem_new();
251 void write_nomemb_new();
252 void write_nomemh_new();
253 void write_nomemd_new();
254 void write_rdram_new();
255 void write_rdramb_new();
256 void write_rdramh_new();
257 void write_rdramd_new();
258 extern u_int memory_map[1048576];
259
260 // Needed by assembler
261 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32);
262 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty);
263 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr);
264 void load_all_regs(signed char i_regmap[]);
265 void load_needed_regs(signed char i_regmap[],signed char next_regmap[]);
266 void load_regs_entry(int t);
267 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i);
268
269 int tracedebug=0;
270
271 //#define DEBUG_CYCLE_COUNT 1
272
273 static void tlb_hacks()
274 {
275 #ifndef DISABLE_TLB
276   // Goldeneye hack
277   if (strncmp((char *) ROM_HEADER->nom, "GOLDENEYE",9) == 0)
278   {
279     u_int addr;
280     int n;
281     switch (ROM_HEADER->Country_code&0xFF) 
282     {
283       case 0x45: // U
284         addr=0x34b30;
285         break;                   
286       case 0x4A: // J 
287         addr=0x34b70;    
288         break;    
289       case 0x50: // E 
290         addr=0x329f0;
291         break;                        
292       default: 
293         // Unknown country code
294         addr=0;
295         break;
296     }
297     u_int rom_addr=(u_int)rom;
298     #ifdef ROM_COPY
299     // Since memory_map is 32-bit, on 64-bit systems the rom needs to be
300     // in the lower 4G of memory to use this hack.  Copy it if necessary.
301     if((void *)rom>(void *)0xffffffff) {
302       munmap(ROM_COPY, 67108864);
303       if(mmap(ROM_COPY, 12582912,
304               PROT_READ | PROT_WRITE,
305               MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
306               -1, 0) <= 0) {printf("mmap() failed\n");}
307       memcpy(ROM_COPY,rom,12582912);
308       rom_addr=(u_int)ROM_COPY;
309     }
310     #endif
311     if(addr) {
312       for(n=0x7F000;n<0x80000;n++) {
313         memory_map[n]=(((u_int)(rom_addr+addr-0x7F000000))>>2)|0x40000000;
314       }
315     }
316   }
317 #endif
318 }
319
320 static u_int get_page(u_int vaddr)
321 {
322 #ifndef PCSX
323   u_int page=(vaddr^0x80000000)>>12;
324 #else
325   u_int page=vaddr&~0xe0000000;
326   if (page < 0x1000000)
327     page &= ~0x0e00000; // RAM mirrors
328   page>>=12;
329 #endif
330 #ifndef DISABLE_TLB
331   if(page>262143&&tlb_LUT_r[vaddr>>12]) page=(tlb_LUT_r[vaddr>>12]^0x80000000)>>12;
332 #endif
333   if(page>2048) page=2048+(page&2047);
334   return page;
335 }
336
337 static u_int get_vpage(u_int vaddr)
338 {
339   u_int vpage=(vaddr^0x80000000)>>12;
340 #ifndef DISABLE_TLB
341   if(vpage>262143&&tlb_LUT_r[vaddr>>12]) vpage&=2047; // jump_dirty uses a hash of the virtual address instead
342 #endif
343   if(vpage>2048) vpage=2048+(vpage&2047);
344   return vpage;
345 }
346
347 // Get address from virtual address
348 // This is called from the recompiled JR/JALR instructions
349 void *get_addr(u_int vaddr)
350 {
351   u_int page=get_page(vaddr);
352   u_int vpage=get_vpage(vaddr);
353   struct ll_entry *head;
354   //printf("TRACE: count=%d next=%d (get_addr %x,page %d)\n",Count,next_interupt,vaddr,page);
355   head=jump_in[page];
356   while(head!=NULL) {
357     if(head->vaddr==vaddr&&head->reg32==0) {
358   //printf("TRACE: count=%d next=%d (get_addr match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
359       int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
360       ht_bin[3]=ht_bin[1];
361       ht_bin[2]=ht_bin[0];
362       ht_bin[1]=(int)head->addr;
363       ht_bin[0]=vaddr;
364       return head->addr;
365     }
366     head=head->next;
367   }
368   head=jump_dirty[vpage];
369   while(head!=NULL) {
370     if(head->vaddr==vaddr&&head->reg32==0) {
371       //printf("TRACE: count=%d next=%d (get_addr match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
372       // Don't restore blocks which are about to expire from the cache
373       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
374       if(verify_dirty(head->addr)) {
375         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
376         invalid_code[vaddr>>12]=0;
377         inv_code_start=inv_code_end=~0;
378         memory_map[vaddr>>12]|=0x40000000;
379         if(vpage<2048) {
380 #ifndef DISABLE_TLB
381           if(tlb_LUT_r[vaddr>>12]) {
382             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
383             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
384           }
385 #endif
386           restore_candidate[vpage>>3]|=1<<(vpage&7);
387         }
388         else restore_candidate[page>>3]|=1<<(page&7);
389         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
390         if(ht_bin[0]==vaddr) {
391           ht_bin[1]=(int)head->addr; // Replace existing entry
392         }
393         else
394         {
395           ht_bin[3]=ht_bin[1];
396           ht_bin[2]=ht_bin[0];
397           ht_bin[1]=(int)head->addr;
398           ht_bin[0]=vaddr;
399         }
400         return head->addr;
401       }
402     }
403     head=head->next;
404   }
405   //printf("TRACE: count=%d next=%d (get_addr no-match %x)\n",Count,next_interupt,vaddr);
406   int r=new_recompile_block(vaddr);
407   if(r==0) return get_addr(vaddr);
408   // Execute in unmapped page, generate pagefault execption
409   Status|=2;
410   Cause=(vaddr<<31)|0x8;
411   EPC=(vaddr&1)?vaddr-5:vaddr;
412   BadVAddr=(vaddr&~1);
413   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
414   EntryHi=BadVAddr&0xFFFFE000;
415   return get_addr_ht(0x80000000);
416 }
417 // Look up address in hash table first
418 void *get_addr_ht(u_int vaddr)
419 {
420   //printf("TRACE: count=%d next=%d (get_addr_ht %x)\n",Count,next_interupt,vaddr);
421   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
422   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
423   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
424   return get_addr(vaddr);
425 }
426
427 void *get_addr_32(u_int vaddr,u_int flags)
428 {
429 #ifdef FORCE32
430   return get_addr(vaddr);
431 #else
432   //printf("TRACE: count=%d next=%d (get_addr_32 %x,flags %x)\n",Count,next_interupt,vaddr,flags);
433   int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
434   if(ht_bin[0]==vaddr) return (void *)ht_bin[1];
435   if(ht_bin[2]==vaddr) return (void *)ht_bin[3];
436   u_int page=get_page(vaddr);
437   u_int vpage=get_vpage(vaddr);
438   struct ll_entry *head;
439   head=jump_in[page];
440   while(head!=NULL) {
441     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
442       //printf("TRACE: count=%d next=%d (get_addr_32 match %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
443       if(head->reg32==0) {
444         int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
445         if(ht_bin[0]==-1) {
446           ht_bin[1]=(int)head->addr;
447           ht_bin[0]=vaddr;
448         }else if(ht_bin[2]==-1) {
449           ht_bin[3]=(int)head->addr;
450           ht_bin[2]=vaddr;
451         }
452         //ht_bin[3]=ht_bin[1];
453         //ht_bin[2]=ht_bin[0];
454         //ht_bin[1]=(int)head->addr;
455         //ht_bin[0]=vaddr;
456       }
457       return head->addr;
458     }
459     head=head->next;
460   }
461   head=jump_dirty[vpage];
462   while(head!=NULL) {
463     if(head->vaddr==vaddr&&(head->reg32&flags)==0) {
464       //printf("TRACE: count=%d next=%d (get_addr_32 match dirty %x: %x)\n",Count,next_interupt,vaddr,(int)head->addr);
465       // Don't restore blocks which are about to expire from the cache
466       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
467       if(verify_dirty(head->addr)) {
468         //printf("restore candidate: %x (%d) d=%d\n",vaddr,page,invalid_code[vaddr>>12]);
469         invalid_code[vaddr>>12]=0;
470         inv_code_start=inv_code_end=~0;
471         memory_map[vaddr>>12]|=0x40000000;
472         if(vpage<2048) {
473 #ifndef DISABLE_TLB
474           if(tlb_LUT_r[vaddr>>12]) {
475             invalid_code[tlb_LUT_r[vaddr>>12]>>12]=0;
476             memory_map[tlb_LUT_r[vaddr>>12]>>12]|=0x40000000;
477           }
478 #endif
479           restore_candidate[vpage>>3]|=1<<(vpage&7);
480         }
481         else restore_candidate[page>>3]|=1<<(page&7);
482         if(head->reg32==0) {
483           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
484           if(ht_bin[0]==-1) {
485             ht_bin[1]=(int)head->addr;
486             ht_bin[0]=vaddr;
487           }else if(ht_bin[2]==-1) {
488             ht_bin[3]=(int)head->addr;
489             ht_bin[2]=vaddr;
490           }
491           //ht_bin[3]=ht_bin[1];
492           //ht_bin[2]=ht_bin[0];
493           //ht_bin[1]=(int)head->addr;
494           //ht_bin[0]=vaddr;
495         }
496         return head->addr;
497       }
498     }
499     head=head->next;
500   }
501   //printf("TRACE: count=%d next=%d (get_addr_32 no-match %x,flags %x)\n",Count,next_interupt,vaddr,flags);
502   int r=new_recompile_block(vaddr);
503   if(r==0) return get_addr(vaddr);
504   // Execute in unmapped page, generate pagefault execption
505   Status|=2;
506   Cause=(vaddr<<31)|0x8;
507   EPC=(vaddr&1)?vaddr-5:vaddr;
508   BadVAddr=(vaddr&~1);
509   Context=(Context&0xFF80000F)|((BadVAddr>>9)&0x007FFFF0);
510   EntryHi=BadVAddr&0xFFFFE000;
511   return get_addr_ht(0x80000000);
512 #endif
513 }
514
515 void clear_all_regs(signed char regmap[])
516 {
517   int hr;
518   for (hr=0;hr<HOST_REGS;hr++) regmap[hr]=-1;
519 }
520
521 signed char get_reg(signed char regmap[],int r)
522 {
523   int hr;
524   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap[hr]==r) return hr;
525   return -1;
526 }
527
528 // Find a register that is available for two consecutive cycles
529 signed char get_reg2(signed char regmap1[],signed char regmap2[],int r)
530 {
531   int hr;
532   for (hr=0;hr<HOST_REGS;hr++) if(hr!=EXCLUDE_REG&&regmap1[hr]==r&&regmap2[hr]==r) return hr;
533   return -1;
534 }
535
536 int count_free_regs(signed char regmap[])
537 {
538   int count=0;
539   int hr;
540   for(hr=0;hr<HOST_REGS;hr++)
541   {
542     if(hr!=EXCLUDE_REG) {
543       if(regmap[hr]<0) count++;
544     }
545   }
546   return count;
547 }
548
549 void dirty_reg(struct regstat *cur,signed char reg)
550 {
551   int hr;
552   if(!reg) return;
553   for (hr=0;hr<HOST_REGS;hr++) {
554     if((cur->regmap[hr]&63)==reg) {
555       cur->dirty|=1<<hr;
556     }
557   }
558 }
559
560 // If we dirty the lower half of a 64 bit register which is now being
561 // sign-extended, we need to dump the upper half.
562 // Note: Do this only after completion of the instruction, because
563 // some instructions may need to read the full 64-bit value even if
564 // overwriting it (eg SLTI, DSRA32).
565 static void flush_dirty_uppers(struct regstat *cur)
566 {
567   int hr,reg;
568   for (hr=0;hr<HOST_REGS;hr++) {
569     if((cur->dirty>>hr)&1) {
570       reg=cur->regmap[hr];
571       if(reg>=64) 
572         if((cur->is32>>(reg&63))&1) cur->regmap[hr]=-1;
573     }
574   }
575 }
576
577 void set_const(struct regstat *cur,signed char reg,uint64_t value)
578 {
579   int hr;
580   if(!reg) return;
581   for (hr=0;hr<HOST_REGS;hr++) {
582     if(cur->regmap[hr]==reg) {
583       cur->isconst|=1<<hr;
584       cur->constmap[hr]=value;
585     }
586     else if((cur->regmap[hr]^64)==reg) {
587       cur->isconst|=1<<hr;
588       cur->constmap[hr]=value>>32;
589     }
590   }
591 }
592
593 void clear_const(struct regstat *cur,signed char reg)
594 {
595   int hr;
596   if(!reg) return;
597   for (hr=0;hr<HOST_REGS;hr++) {
598     if((cur->regmap[hr]&63)==reg) {
599       cur->isconst&=~(1<<hr);
600     }
601   }
602 }
603
604 int is_const(struct regstat *cur,signed char reg)
605 {
606   int hr;
607   if(reg<0) return 0;
608   if(!reg) return 1;
609   for (hr=0;hr<HOST_REGS;hr++) {
610     if((cur->regmap[hr]&63)==reg) {
611       return (cur->isconst>>hr)&1;
612     }
613   }
614   return 0;
615 }
616 uint64_t get_const(struct regstat *cur,signed char reg)
617 {
618   int hr;
619   if(!reg) return 0;
620   for (hr=0;hr<HOST_REGS;hr++) {
621     if(cur->regmap[hr]==reg) {
622       return cur->constmap[hr];
623     }
624   }
625   printf("Unknown constant in r%d\n",reg);
626   exit(1);
627 }
628
629 // Least soon needed registers
630 // Look at the next ten instructions and see which registers
631 // will be used.  Try not to reallocate these.
632 void lsn(u_char hsn[], int i, int *preferred_reg)
633 {
634   int j;
635   int b=-1;
636   for(j=0;j<9;j++)
637   {
638     if(i+j>=slen) {
639       j=slen-i-1;
640       break;
641     }
642     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
643     {
644       // Don't go past an unconditonal jump
645       j++;
646       break;
647     }
648   }
649   for(;j>=0;j--)
650   {
651     if(rs1[i+j]) hsn[rs1[i+j]]=j;
652     if(rs2[i+j]) hsn[rs2[i+j]]=j;
653     if(rt1[i+j]) hsn[rt1[i+j]]=j;
654     if(rt2[i+j]) hsn[rt2[i+j]]=j;
655     if(itype[i+j]==STORE || itype[i+j]==STORELR) {
656       // Stores can allocate zero
657       hsn[rs1[i+j]]=j;
658       hsn[rs2[i+j]]=j;
659     }
660     // On some architectures stores need invc_ptr
661     #if defined(HOST_IMM8)
662     if(itype[i+j]==STORE || itype[i+j]==STORELR || (opcode[i+j]&0x3b)==0x39 || (opcode[i+j]&0x3b)==0x3a) {
663       hsn[INVCP]=j;
664     }
665     #endif
666     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
667     {
668       hsn[CCREG]=j;
669       b=j;
670     }
671   }
672   if(b>=0)
673   {
674     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
675     {
676       // Follow first branch
677       int t=(ba[i+b]-start)>>2;
678       j=7-b;if(t+j>=slen) j=slen-t-1;
679       for(;j>=0;j--)
680       {
681         if(rs1[t+j]) if(hsn[rs1[t+j]]>j+b+2) hsn[rs1[t+j]]=j+b+2;
682         if(rs2[t+j]) if(hsn[rs2[t+j]]>j+b+2) hsn[rs2[t+j]]=j+b+2;
683         //if(rt1[t+j]) if(hsn[rt1[t+j]]>j+b+2) hsn[rt1[t+j]]=j+b+2;
684         //if(rt2[t+j]) if(hsn[rt2[t+j]]>j+b+2) hsn[rt2[t+j]]=j+b+2;
685       }
686     }
687     // TODO: preferred register based on backward branch
688   }
689   // Delay slot should preferably not overwrite branch conditions or cycle count
690   if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
691     if(rs1[i-1]) if(hsn[rs1[i-1]]>1) hsn[rs1[i-1]]=1;
692     if(rs2[i-1]) if(hsn[rs2[i-1]]>1) hsn[rs2[i-1]]=1;
693     hsn[CCREG]=1;
694     // ...or hash tables
695     hsn[RHASH]=1;
696     hsn[RHTBL]=1;
697   }
698   // Coprocessor load/store needs FTEMP, even if not declared
699   if(itype[i]==C1LS||itype[i]==C2LS) {
700     hsn[FTEMP]=0;
701   }
702   // Load L/R also uses FTEMP as a temporary register
703   if(itype[i]==LOADLR) {
704     hsn[FTEMP]=0;
705   }
706   // Also SWL/SWR/SDL/SDR
707   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) {
708     hsn[FTEMP]=0;
709   }
710   // Don't remove the TLB registers either
711   if(itype[i]==LOAD || itype[i]==LOADLR || itype[i]==STORE || itype[i]==STORELR || itype[i]==C1LS || itype[i]==C2LS) {
712     hsn[TLREG]=0;
713   }
714   // Don't remove the miniht registers
715   if(itype[i]==UJUMP||itype[i]==RJUMP)
716   {
717     hsn[RHASH]=0;
718     hsn[RHTBL]=0;
719   }
720 }
721
722 // We only want to allocate registers if we're going to use them again soon
723 int needed_again(int r, int i)
724 {
725   int j;
726   int b=-1;
727   int rn=10;
728   
729   if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000))
730   {
731     if(ba[i-1]<start || ba[i-1]>start+slen*4-4)
732       return 0; // Don't need any registers if exiting the block
733   }
734   for(j=0;j<9;j++)
735   {
736     if(i+j>=slen) {
737       j=slen-i-1;
738       break;
739     }
740     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
741     {
742       // Don't go past an unconditonal jump
743       j++;
744       break;
745     }
746     if(itype[i+j]==SYSCALL||itype[i+j]==HLECALL||itype[i+j]==INTCALL||((source[i+j]&0xfc00003f)==0x0d))
747     {
748       break;
749     }
750   }
751   for(;j>=1;j--)
752   {
753     if(rs1[i+j]==r) rn=j;
754     if(rs2[i+j]==r) rn=j;
755     if((unneeded_reg[i+j]>>r)&1) rn=10;
756     if(i+j>=0&&(itype[i+j]==UJUMP||itype[i+j]==CJUMP||itype[i+j]==SJUMP||itype[i+j]==FJUMP))
757     {
758       b=j;
759     }
760   }
761   /*
762   if(b>=0)
763   {
764     if(ba[i+b]>=start && ba[i+b]<(start+slen*4))
765     {
766       // Follow first branch
767       int o=rn;
768       int t=(ba[i+b]-start)>>2;
769       j=7-b;if(t+j>=slen) j=slen-t-1;
770       for(;j>=0;j--)
771       {
772         if(!((unneeded_reg[t+j]>>r)&1)) {
773           if(rs1[t+j]==r) if(rn>j+b+2) rn=j+b+2;
774           if(rs2[t+j]==r) if(rn>j+b+2) rn=j+b+2;
775         }
776         else rn=o;
777       }
778     }
779   }*/
780   if(rn<10) return 1;
781   return 0;
782 }
783
784 // Try to match register allocations at the end of a loop with those
785 // at the beginning
786 int loop_reg(int i, int r, int hr)
787 {
788   int j,k;
789   for(j=0;j<9;j++)
790   {
791     if(i+j>=slen) {
792       j=slen-i-1;
793       break;
794     }
795     if(itype[i+j]==UJUMP||itype[i+j]==RJUMP||(source[i+j]>>16)==0x1000)
796     {
797       // Don't go past an unconditonal jump
798       j++;
799       break;
800     }
801   }
802   k=0;
803   if(i>0){
804     if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)
805       k--;
806   }
807   for(;k<j;k++)
808   {
809     if(r<64&&((unneeded_reg[i+k]>>r)&1)) return hr;
810     if(r>64&&((unneeded_reg_upper[i+k]>>r)&1)) return hr;
811     if(i+k>=0&&(itype[i+k]==UJUMP||itype[i+k]==CJUMP||itype[i+k]==SJUMP||itype[i+k]==FJUMP))
812     {
813       if(ba[i+k]>=start && ba[i+k]<(start+i*4))
814       {
815         int t=(ba[i+k]-start)>>2;
816         int reg=get_reg(regs[t].regmap_entry,r);
817         if(reg>=0) return reg;
818         //reg=get_reg(regs[t+1].regmap_entry,r);
819         //if(reg>=0) return reg;
820       }
821     }
822   }
823   return hr;
824 }
825
826
827 // Allocate every register, preserving source/target regs
828 void alloc_all(struct regstat *cur,int i)
829 {
830   int hr;
831   
832   for(hr=0;hr<HOST_REGS;hr++) {
833     if(hr!=EXCLUDE_REG) {
834       if(((cur->regmap[hr]&63)!=rs1[i])&&((cur->regmap[hr]&63)!=rs2[i])&&
835          ((cur->regmap[hr]&63)!=rt1[i])&&((cur->regmap[hr]&63)!=rt2[i]))
836       {
837         cur->regmap[hr]=-1;
838         cur->dirty&=~(1<<hr);
839       }
840       // Don't need zeros
841       if((cur->regmap[hr]&63)==0)
842       {
843         cur->regmap[hr]=-1;
844         cur->dirty&=~(1<<hr);
845       }
846     }
847   }
848 }
849
850 #ifndef FORCE32
851 void div64(int64_t dividend,int64_t divisor)
852 {
853   lo=dividend/divisor;
854   hi=dividend%divisor;
855   //printf("TRACE: ddiv %8x%8x %8x%8x\n" ,(int)reg[HIREG],(int)(reg[HIREG]>>32)
856   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
857 }
858 void divu64(uint64_t dividend,uint64_t divisor)
859 {
860   lo=dividend/divisor;
861   hi=dividend%divisor;
862   //printf("TRACE: ddivu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
863   //                                     ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
864 }
865
866 void mult64(uint64_t m1,uint64_t m2)
867 {
868    unsigned long long int op1, op2, op3, op4;
869    unsigned long long int result1, result2, result3, result4;
870    unsigned long long int temp1, temp2, temp3, temp4;
871    int sign = 0;
872    
873    if (m1 < 0)
874      {
875     op2 = -m1;
876     sign = 1 - sign;
877      }
878    else op2 = m1;
879    if (m2 < 0)
880      {
881     op4 = -m2;
882     sign = 1 - sign;
883      }
884    else op4 = m2;
885    
886    op1 = op2 & 0xFFFFFFFF;
887    op2 = (op2 >> 32) & 0xFFFFFFFF;
888    op3 = op4 & 0xFFFFFFFF;
889    op4 = (op4 >> 32) & 0xFFFFFFFF;
890    
891    temp1 = op1 * op3;
892    temp2 = (temp1 >> 32) + op1 * op4;
893    temp3 = op2 * op3;
894    temp4 = (temp3 >> 32) + op2 * op4;
895    
896    result1 = temp1 & 0xFFFFFFFF;
897    result2 = temp2 + (temp3 & 0xFFFFFFFF);
898    result3 = (result2 >> 32) + temp4;
899    result4 = (result3 >> 32);
900    
901    lo = result1 | (result2 << 32);
902    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
903    if (sign)
904      {
905     hi = ~hi;
906     if (!lo) hi++;
907     else lo = ~lo + 1;
908      }
909 }
910
911 void multu64(uint64_t m1,uint64_t m2)
912 {
913    unsigned long long int op1, op2, op3, op4;
914    unsigned long long int result1, result2, result3, result4;
915    unsigned long long int temp1, temp2, temp3, temp4;
916    
917    op1 = m1 & 0xFFFFFFFF;
918    op2 = (m1 >> 32) & 0xFFFFFFFF;
919    op3 = m2 & 0xFFFFFFFF;
920    op4 = (m2 >> 32) & 0xFFFFFFFF;
921    
922    temp1 = op1 * op3;
923    temp2 = (temp1 >> 32) + op1 * op4;
924    temp3 = op2 * op3;
925    temp4 = (temp3 >> 32) + op2 * op4;
926    
927    result1 = temp1 & 0xFFFFFFFF;
928    result2 = temp2 + (temp3 & 0xFFFFFFFF);
929    result3 = (result2 >> 32) + temp4;
930    result4 = (result3 >> 32);
931    
932    lo = result1 | (result2 << 32);
933    hi = (result3 & 0xFFFFFFFF) | (result4 << 32);
934    
935   //printf("TRACE: dmultu %8x%8x %8x%8x\n",(int)reg[HIREG],(int)(reg[HIREG]>>32)
936   //                                      ,(int)reg[LOREG],(int)(reg[LOREG]>>32));
937 }
938
939 uint64_t ldl_merge(uint64_t original,uint64_t loaded,u_int bits)
940 {
941   if(bits) {
942     original<<=64-bits;
943     original>>=64-bits;
944     loaded<<=bits;
945     original|=loaded;
946   }
947   else original=loaded;
948   return original;
949 }
950 uint64_t ldr_merge(uint64_t original,uint64_t loaded,u_int bits)
951 {
952   if(bits^56) {
953     original>>=64-(bits^56);
954     original<<=64-(bits^56);
955     loaded>>=bits^56;
956     original|=loaded;
957   }
958   else original=loaded;
959   return original;
960 }
961 #endif
962
963 #ifdef __i386__
964 #include "assem_x86.c"
965 #endif
966 #ifdef __x86_64__
967 #include "assem_x64.c"
968 #endif
969 #ifdef __arm__
970 #include "assem_arm.c"
971 #endif
972
973 // Add virtual address mapping to linked list
974 void ll_add(struct ll_entry **head,int vaddr,void *addr)
975 {
976   struct ll_entry *new_entry;
977   new_entry=malloc(sizeof(struct ll_entry));
978   assert(new_entry!=NULL);
979   new_entry->vaddr=vaddr;
980   new_entry->reg32=0;
981   new_entry->addr=addr;
982   new_entry->next=*head;
983   *head=new_entry;
984 }
985
986 // Add virtual address mapping for 32-bit compiled block
987 void ll_add_32(struct ll_entry **head,int vaddr,u_int reg32,void *addr)
988 {
989   ll_add(head,vaddr,addr);
990 #ifndef FORCE32
991   (*head)->reg32=reg32;
992 #endif
993 }
994
995 // Check if an address is already compiled
996 // but don't return addresses which are about to expire from the cache
997 void *check_addr(u_int vaddr)
998 {
999   u_int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
1000   if(ht_bin[0]==vaddr) {
1001     if(((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1002       if(isclean(ht_bin[1])) return (void *)ht_bin[1];
1003   }
1004   if(ht_bin[2]==vaddr) {
1005     if(((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2)))
1006       if(isclean(ht_bin[3])) return (void *)ht_bin[3];
1007   }
1008   u_int page=get_page(vaddr);
1009   struct ll_entry *head;
1010   head=jump_in[page];
1011   while(head!=NULL) {
1012     if(head->vaddr==vaddr&&head->reg32==0) {
1013       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1014         // Update existing entry with current address
1015         if(ht_bin[0]==vaddr) {
1016           ht_bin[1]=(int)head->addr;
1017           return head->addr;
1018         }
1019         if(ht_bin[2]==vaddr) {
1020           ht_bin[3]=(int)head->addr;
1021           return head->addr;
1022         }
1023         // Insert into hash table with low priority.
1024         // Don't evict existing entries, as they are probably
1025         // addresses that are being accessed frequently.
1026         if(ht_bin[0]==-1) {
1027           ht_bin[1]=(int)head->addr;
1028           ht_bin[0]=vaddr;
1029         }else if(ht_bin[2]==-1) {
1030           ht_bin[3]=(int)head->addr;
1031           ht_bin[2]=vaddr;
1032         }
1033         return head->addr;
1034       }
1035     }
1036     head=head->next;
1037   }
1038   return 0;
1039 }
1040
1041 void remove_hash(int vaddr)
1042 {
1043   //printf("remove hash: %x\n",vaddr);
1044   int *ht_bin=hash_table[(((vaddr)>>16)^vaddr)&0xFFFF];
1045   if(ht_bin[2]==vaddr) {
1046     ht_bin[2]=ht_bin[3]=-1;
1047   }
1048   if(ht_bin[0]==vaddr) {
1049     ht_bin[0]=ht_bin[2];
1050     ht_bin[1]=ht_bin[3];
1051     ht_bin[2]=ht_bin[3]=-1;
1052   }
1053 }
1054
1055 void ll_remove_matching_addrs(struct ll_entry **head,int addr,int shift)
1056 {
1057   struct ll_entry *next;
1058   while(*head) {
1059     if(((u_int)((*head)->addr)>>shift)==(addr>>shift) || 
1060        ((u_int)((*head)->addr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift))
1061     {
1062       inv_debug("EXP: Remove pointer to %x (%x)\n",(int)(*head)->addr,(*head)->vaddr);
1063       remove_hash((*head)->vaddr);
1064       next=(*head)->next;
1065       free(*head);
1066       *head=next;
1067     }
1068     else
1069     {
1070       head=&((*head)->next);
1071     }
1072   }
1073 }
1074
1075 // Remove all entries from linked list
1076 void ll_clear(struct ll_entry **head)
1077 {
1078   struct ll_entry *cur;
1079   struct ll_entry *next;
1080   if(cur=*head) {
1081     *head=0;
1082     while(cur) {
1083       next=cur->next;
1084       free(cur);
1085       cur=next;
1086     }
1087   }
1088 }
1089
1090 // Dereference the pointers and remove if it matches
1091 void ll_kill_pointers(struct ll_entry *head,int addr,int shift)
1092 {
1093   while(head) {
1094     int ptr=get_pointer(head->addr);
1095     inv_debug("EXP: Lookup pointer to %x at %x (%x)\n",(int)ptr,(int)head->addr,head->vaddr);
1096     if(((ptr>>shift)==(addr>>shift)) ||
1097        (((ptr-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(addr>>shift)))
1098     {
1099       inv_debug("EXP: Kill pointer at %x (%x)\n",(int)head->addr,head->vaddr);
1100       u_int host_addr=(u_int)kill_pointer(head->addr);
1101       #ifdef __arm__
1102         needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1103       #endif
1104     }
1105     head=head->next;
1106   }
1107 }
1108
1109 // This is called when we write to a compiled block (see do_invstub)
1110 void invalidate_page(u_int page)
1111 {
1112   struct ll_entry *head;
1113   struct ll_entry *next;
1114   head=jump_in[page];
1115   jump_in[page]=0;
1116   while(head!=NULL) {
1117     inv_debug("INVALIDATE: %x\n",head->vaddr);
1118     remove_hash(head->vaddr);
1119     next=head->next;
1120     free(head);
1121     head=next;
1122   }
1123   head=jump_out[page];
1124   jump_out[page]=0;
1125   while(head!=NULL) {
1126     inv_debug("INVALIDATE: kill pointer to %x (%x)\n",head->vaddr,(int)head->addr);
1127     u_int host_addr=(u_int)kill_pointer(head->addr);
1128     #ifdef __arm__
1129       needs_clear_cache[(host_addr-(u_int)BASE_ADDR)>>17]|=1<<(((host_addr-(u_int)BASE_ADDR)>>12)&31);
1130     #endif
1131     next=head->next;
1132     free(head);
1133     head=next;
1134   }
1135 }
1136
1137 static void invalidate_block_range(u_int block, u_int first, u_int last)
1138 {
1139   u_int page=get_page(block<<12);
1140   //printf("first=%d last=%d\n",first,last);
1141   invalidate_page(page);
1142   assert(first+5>page); // NB: this assumes MAXBLOCK<=4096 (4 pages)
1143   assert(last<page+5);
1144   // Invalidate the adjacent pages if a block crosses a 4K boundary
1145   while(first<page) {
1146     invalidate_page(first);
1147     first++;
1148   }
1149   for(first=page+1;first<last;first++) {
1150     invalidate_page(first);
1151   }
1152   #ifdef __arm__
1153     do_clear_cache();
1154   #endif
1155   
1156   // Don't trap writes
1157   invalid_code[block]=1;
1158 #ifndef DISABLE_TLB
1159   // If there is a valid TLB entry for this page, remove write protect
1160   if(tlb_LUT_w[block]) {
1161     assert(tlb_LUT_r[block]==tlb_LUT_w[block]);
1162     // CHECK: Is this right?
1163     memory_map[block]=((tlb_LUT_w[block]&0xFFFFF000)-(block<<12)+(unsigned int)rdram-0x80000000)>>2;
1164     u_int real_block=tlb_LUT_w[block]>>12;
1165     invalid_code[real_block]=1;
1166     if(real_block>=0x80000&&real_block<0x80800) memory_map[real_block]=((u_int)rdram-0x80000000)>>2;
1167   }
1168   else if(block>=0x80000&&block<0x80800) memory_map[block]=((u_int)rdram-0x80000000)>>2;
1169 #endif
1170
1171   #ifdef USE_MINI_HT
1172   memset(mini_ht,-1,sizeof(mini_ht));
1173   #endif
1174 }
1175
1176 void invalidate_block(u_int block)
1177 {
1178   u_int page=get_page(block<<12);
1179   u_int vpage=get_vpage(block<<12);
1180   inv_debug("INVALIDATE: %x (%d)\n",block<<12,page);
1181   //inv_debug("invalid_code[block]=%d\n",invalid_code[block]);
1182   u_int first,last;
1183   first=last=page;
1184   struct ll_entry *head;
1185   head=jump_dirty[vpage];
1186   //printf("page=%d vpage=%d\n",page,vpage);
1187   while(head!=NULL) {
1188     u_int start,end;
1189     if(vpage>2047||(head->vaddr>>12)==block) { // Ignore vaddr hash collision
1190       get_bounds((int)head->addr,&start,&end);
1191       //printf("start: %x end: %x\n",start,end);
1192       if(page<2048&&start>=0x80000000&&end<0x80000000+RAM_SIZE) {
1193         if(((start-(u_int)rdram)>>12)<=page&&((end-1-(u_int)rdram)>>12)>=page) {
1194           if((((start-(u_int)rdram)>>12)&2047)<first) first=((start-(u_int)rdram)>>12)&2047;
1195           if((((end-1-(u_int)rdram)>>12)&2047)>last) last=((end-1-(u_int)rdram)>>12)&2047;
1196         }
1197       }
1198 #ifndef DISABLE_TLB
1199       if(page<2048&&(signed int)start>=(signed int)0xC0000000&&(signed int)end>=(signed int)0xC0000000) {
1200         if(((start+memory_map[start>>12]-(u_int)rdram)>>12)<=page&&((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)>=page) {
1201           if((((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047)<first) first=((start+memory_map[start>>12]-(u_int)rdram)>>12)&2047;
1202           if((((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047)>last) last=((end-1+memory_map[(end-1)>>12]-(u_int)rdram)>>12)&2047;
1203         }
1204       }
1205 #endif
1206     }
1207     head=head->next;
1208   }
1209   invalidate_block_range(block,first,last);
1210 }
1211
1212 void invalidate_addr(u_int addr)
1213 {
1214 #ifdef PCSX
1215   //static int rhits;
1216   // this check is done by the caller
1217   //if (inv_code_start<=addr&&addr<=inv_code_end) { rhits++; return; }
1218   u_int page=get_page(addr);
1219   if(page<2048) { // RAM
1220     struct ll_entry *head;
1221     u_int addr_min=~0, addr_max=0;
1222     int mask=RAM_SIZE-1;
1223     int pg1;
1224     inv_code_start=addr&~0xfff;
1225     inv_code_end=addr|0xfff;
1226     pg1=page;
1227     if (pg1>0) {
1228       // must check previous page too because of spans..
1229       pg1--;
1230       inv_code_start-=0x1000;
1231     }
1232     for(;pg1<=page;pg1++) {
1233       for(head=jump_dirty[pg1];head!=NULL;head=head->next) {
1234         u_int start,end;
1235         get_bounds((int)head->addr,&start,&end);
1236         if((start&mask)<=(addr&mask)&&(addr&mask)<(end&mask)) {
1237           if(start<addr_min) addr_min=start;
1238           if(end>addr_max) addr_max=end;
1239         }
1240         else if(addr<start) {
1241           if(start<inv_code_end)
1242             inv_code_end=start-1;
1243         }
1244         else {
1245           if(end>inv_code_start)
1246             inv_code_start=end;
1247         }
1248       }
1249     }
1250     if (addr_min!=~0) {
1251       inv_debug("INV ADDR: %08x hit %08x-%08x\n", addr, addr_min, addr_max);
1252       inv_code_start=inv_code_end=~0;
1253       invalidate_block_range(addr>>12,(addr_min&mask)>>12,(addr_max&mask)>>12);
1254       return;
1255     }
1256     else {
1257       inv_debug("INV ADDR: %08x miss, inv %08x-%08x, sk %d\n", addr, inv_code_start, inv_code_end, 0);//rhits);
1258     }
1259     //rhits=0;
1260     if(page!=0) // FIXME: don't know what's up with page 0 (Klonoa)
1261       return;
1262   }
1263 #endif
1264   invalidate_block(addr>>12);
1265 }
1266
1267 // This is called when loading a save state.
1268 // Anything could have changed, so invalidate everything.
1269 void invalidate_all_pages()
1270 {
1271   u_int page,n;
1272   for(page=0;page<4096;page++)
1273     invalidate_page(page);
1274   for(page=0;page<1048576;page++)
1275     if(!invalid_code[page]) {
1276       restore_candidate[(page&2047)>>3]|=1<<(page&7);
1277       restore_candidate[((page&2047)>>3)+256]|=1<<(page&7);
1278     }
1279   #ifdef __arm__
1280   __clear_cache((void *)BASE_ADDR,(void *)BASE_ADDR+(1<<TARGET_SIZE_2));
1281   #endif
1282   #ifdef USE_MINI_HT
1283   memset(mini_ht,-1,sizeof(mini_ht));
1284   #endif
1285   #ifndef DISABLE_TLB
1286   // TLB
1287   for(page=0;page<0x100000;page++) {
1288     if(tlb_LUT_r[page]) {
1289       memory_map[page]=((tlb_LUT_r[page]&0xFFFFF000)-(page<<12)+(unsigned int)rdram-0x80000000)>>2;
1290       if(!tlb_LUT_w[page]||!invalid_code[page])
1291         memory_map[page]|=0x40000000; // Write protect
1292     }
1293     else memory_map[page]=-1;
1294     if(page==0x80000) page=0xC0000;
1295   }
1296   tlb_hacks();
1297   #endif
1298 }
1299
1300 // Add an entry to jump_out after making a link
1301 void add_link(u_int vaddr,void *src)
1302 {
1303   u_int page=get_page(vaddr);
1304   inv_debug("add_link: %x -> %x (%d)\n",(int)src,vaddr,page);
1305   int *ptr=(int *)(src+4);
1306   assert((*ptr&0x0fff0000)==0x059f0000);
1307   ll_add(jump_out+page,vaddr,src);
1308   //int ptr=get_pointer(src);
1309   //inv_debug("add_link: Pointer is to %x\n",(int)ptr);
1310 }
1311
1312 // If a code block was found to be unmodified (bit was set in
1313 // restore_candidate) and it remains unmodified (bit is clear
1314 // in invalid_code) then move the entries for that 4K page from
1315 // the dirty list to the clean list.
1316 void clean_blocks(u_int page)
1317 {
1318   struct ll_entry *head;
1319   inv_debug("INV: clean_blocks page=%d\n",page);
1320   head=jump_dirty[page];
1321   while(head!=NULL) {
1322     if(!invalid_code[head->vaddr>>12]) {
1323       // Don't restore blocks which are about to expire from the cache
1324       if((((u_int)head->addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1325         u_int start,end;
1326         if(verify_dirty((int)head->addr)) {
1327           //printf("Possibly Restore %x (%x)\n",head->vaddr, (int)head->addr);
1328           u_int i;
1329           u_int inv=0;
1330           get_bounds((int)head->addr,&start,&end);
1331           if(start-(u_int)rdram<RAM_SIZE) {
1332             for(i=(start-(u_int)rdram+0x80000000)>>12;i<=(end-1-(u_int)rdram+0x80000000)>>12;i++) {
1333               inv|=invalid_code[i];
1334             }
1335           }
1336           if((signed int)head->vaddr>=(signed int)0xC0000000) {
1337             u_int addr = (head->vaddr+(memory_map[head->vaddr>>12]<<2));
1338             //printf("addr=%x start=%x end=%x\n",addr,start,end);
1339             if(addr<start||addr>=end) inv=1;
1340           }
1341           else if((signed int)head->vaddr>=(signed int)0x80000000+RAM_SIZE) {
1342             inv=1;
1343           }
1344           if(!inv) {
1345             void * clean_addr=(void *)get_clean_addr((int)head->addr);
1346             if((((u_int)clean_addr-(u_int)out)<<(32-TARGET_SIZE_2))>0x60000000+(MAX_OUTPUT_BLOCK_SIZE<<(32-TARGET_SIZE_2))) {
1347               u_int ppage=page;
1348 #ifndef DISABLE_TLB
1349               if(page<2048&&tlb_LUT_r[head->vaddr>>12]) ppage=(tlb_LUT_r[head->vaddr>>12]^0x80000000)>>12;
1350 #endif
1351               inv_debug("INV: Restored %x (%x/%x)\n",head->vaddr, (int)head->addr, (int)clean_addr);
1352               //printf("page=%x, addr=%x\n",page,head->vaddr);
1353               //assert(head->vaddr>>12==(page|0x80000));
1354               ll_add_32(jump_in+ppage,head->vaddr,head->reg32,clean_addr);
1355               int *ht_bin=hash_table[((head->vaddr>>16)^head->vaddr)&0xFFFF];
1356               if(!head->reg32) {
1357                 if(ht_bin[0]==head->vaddr) {
1358                   ht_bin[1]=(int)clean_addr; // Replace existing entry
1359                 }
1360                 if(ht_bin[2]==head->vaddr) {
1361                   ht_bin[3]=(int)clean_addr; // Replace existing entry
1362                 }
1363               }
1364             }
1365           }
1366         }
1367       }
1368     }
1369     head=head->next;
1370   }
1371 }
1372
1373
1374 void mov_alloc(struct regstat *current,int i)
1375 {
1376   // Note: Don't need to actually alloc the source registers
1377   if((~current->is32>>rs1[i])&1) {
1378     //alloc_reg64(current,i,rs1[i]);
1379     alloc_reg64(current,i,rt1[i]);
1380     current->is32&=~(1LL<<rt1[i]);
1381   } else {
1382     //alloc_reg(current,i,rs1[i]);
1383     alloc_reg(current,i,rt1[i]);
1384     current->is32|=(1LL<<rt1[i]);
1385   }
1386   clear_const(current,rs1[i]);
1387   clear_const(current,rt1[i]);
1388   dirty_reg(current,rt1[i]);
1389 }
1390
1391 void shiftimm_alloc(struct regstat *current,int i)
1392 {
1393   clear_const(current,rs1[i]);
1394   clear_const(current,rt1[i]);
1395   if(opcode2[i]<=0x3) // SLL/SRL/SRA
1396   {
1397     if(rt1[i]) {
1398       if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1399       else lt1[i]=rs1[i];
1400       alloc_reg(current,i,rt1[i]);
1401       current->is32|=1LL<<rt1[i];
1402       dirty_reg(current,rt1[i]);
1403     }
1404   }
1405   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
1406   {
1407     if(rt1[i]) {
1408       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1409       alloc_reg64(current,i,rt1[i]);
1410       current->is32&=~(1LL<<rt1[i]);
1411       dirty_reg(current,rt1[i]);
1412     }
1413   }
1414   if(opcode2[i]==0x3c) // DSLL32
1415   {
1416     if(rt1[i]) {
1417       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1418       alloc_reg64(current,i,rt1[i]);
1419       current->is32&=~(1LL<<rt1[i]);
1420       dirty_reg(current,rt1[i]);
1421     }
1422   }
1423   if(opcode2[i]==0x3e) // DSRL32
1424   {
1425     if(rt1[i]) {
1426       alloc_reg64(current,i,rs1[i]);
1427       if(imm[i]==32) {
1428         alloc_reg64(current,i,rt1[i]);
1429         current->is32&=~(1LL<<rt1[i]);
1430       } else {
1431         alloc_reg(current,i,rt1[i]);
1432         current->is32|=1LL<<rt1[i];
1433       }
1434       dirty_reg(current,rt1[i]);
1435     }
1436   }
1437   if(opcode2[i]==0x3f) // DSRA32
1438   {
1439     if(rt1[i]) {
1440       alloc_reg64(current,i,rs1[i]);
1441       alloc_reg(current,i,rt1[i]);
1442       current->is32|=1LL<<rt1[i];
1443       dirty_reg(current,rt1[i]);
1444     }
1445   }
1446 }
1447
1448 void shift_alloc(struct regstat *current,int i)
1449 {
1450   if(rt1[i]) {
1451     if(opcode2[i]<=0x07) // SLLV/SRLV/SRAV
1452     {
1453       if(rs1[i]) alloc_reg(current,i,rs1[i]);
1454       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1455       alloc_reg(current,i,rt1[i]);
1456       if(rt1[i]==rs2[i]) {
1457         alloc_reg_temp(current,i,-1);
1458         minimum_free_regs[i]=1;
1459       }
1460       current->is32|=1LL<<rt1[i];
1461     } else { // DSLLV/DSRLV/DSRAV
1462       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
1463       if(rs2[i]) alloc_reg(current,i,rs2[i]);
1464       alloc_reg64(current,i,rt1[i]);
1465       current->is32&=~(1LL<<rt1[i]);
1466       if(opcode2[i]==0x16||opcode2[i]==0x17) // DSRLV and DSRAV need a temporary register
1467       {
1468         alloc_reg_temp(current,i,-1);
1469         minimum_free_regs[i]=1;
1470       }
1471     }
1472     clear_const(current,rs1[i]);
1473     clear_const(current,rs2[i]);
1474     clear_const(current,rt1[i]);
1475     dirty_reg(current,rt1[i]);
1476   }
1477 }
1478
1479 void alu_alloc(struct regstat *current,int i)
1480 {
1481   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
1482     if(rt1[i]) {
1483       if(rs1[i]&&rs2[i]) {
1484         alloc_reg(current,i,rs1[i]);
1485         alloc_reg(current,i,rs2[i]);
1486       }
1487       else {
1488         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1489         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1490       }
1491       alloc_reg(current,i,rt1[i]);
1492     }
1493     current->is32|=1LL<<rt1[i];
1494   }
1495   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
1496     if(rt1[i]) {
1497       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1498       {
1499         alloc_reg64(current,i,rs1[i]);
1500         alloc_reg64(current,i,rs2[i]);
1501         alloc_reg(current,i,rt1[i]);
1502       } else {
1503         alloc_reg(current,i,rs1[i]);
1504         alloc_reg(current,i,rs2[i]);
1505         alloc_reg(current,i,rt1[i]);
1506       }
1507     }
1508     current->is32|=1LL<<rt1[i];
1509   }
1510   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
1511     if(rt1[i]) {
1512       if(rs1[i]&&rs2[i]) {
1513         alloc_reg(current,i,rs1[i]);
1514         alloc_reg(current,i,rs2[i]);
1515       }
1516       else
1517       {
1518         if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1519         if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg(current,i,rs2[i]);
1520       }
1521       alloc_reg(current,i,rt1[i]);
1522       if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
1523       {
1524         if(!((current->uu>>rt1[i])&1)) {
1525           alloc_reg64(current,i,rt1[i]);
1526         }
1527         if(get_reg(current->regmap,rt1[i]|64)>=0) {
1528           if(rs1[i]&&rs2[i]) {
1529             alloc_reg64(current,i,rs1[i]);
1530             alloc_reg64(current,i,rs2[i]);
1531           }
1532           else
1533           {
1534             // Is is really worth it to keep 64-bit values in registers?
1535             #ifdef NATIVE_64BIT
1536             if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1537             if(rs2[i]&&needed_again(rs2[i],i)) alloc_reg64(current,i,rs2[i]);
1538             #endif
1539           }
1540         }
1541         current->is32&=~(1LL<<rt1[i]);
1542       } else {
1543         current->is32|=1LL<<rt1[i];
1544       }
1545     }
1546   }
1547   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
1548     if(rt1[i]) {
1549       if(rs1[i]&&rs2[i]) {
1550         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1551           alloc_reg64(current,i,rs1[i]);
1552           alloc_reg64(current,i,rs2[i]);
1553           alloc_reg64(current,i,rt1[i]);
1554         } else {
1555           alloc_reg(current,i,rs1[i]);
1556           alloc_reg(current,i,rs2[i]);
1557           alloc_reg(current,i,rt1[i]);
1558         }
1559       }
1560       else {
1561         alloc_reg(current,i,rt1[i]);
1562         if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1563           // DADD used as move, or zeroing
1564           // If we have a 64-bit source, then make the target 64 bits too
1565           if(rs1[i]&&!((current->is32>>rs1[i])&1)) {
1566             if(get_reg(current->regmap,rs1[i])>=0) alloc_reg64(current,i,rs1[i]);
1567             alloc_reg64(current,i,rt1[i]);
1568           } else if(rs2[i]&&!((current->is32>>rs2[i])&1)) {
1569             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1570             alloc_reg64(current,i,rt1[i]);
1571           }
1572           if(opcode2[i]>=0x2e&&rs2[i]) {
1573             // DSUB used as negation - 64-bit result
1574             // If we have a 32-bit register, extend it to 64 bits
1575             if(get_reg(current->regmap,rs2[i])>=0) alloc_reg64(current,i,rs2[i]);
1576             alloc_reg64(current,i,rt1[i]);
1577           }
1578         }
1579       }
1580       if(rs1[i]&&rs2[i]) {
1581         current->is32&=~(1LL<<rt1[i]);
1582       } else if(rs1[i]) {
1583         current->is32&=~(1LL<<rt1[i]);
1584         if((current->is32>>rs1[i])&1)
1585           current->is32|=1LL<<rt1[i];
1586       } else if(rs2[i]) {
1587         current->is32&=~(1LL<<rt1[i]);
1588         if((current->is32>>rs2[i])&1)
1589           current->is32|=1LL<<rt1[i];
1590       } else {
1591         current->is32|=1LL<<rt1[i];
1592       }
1593     }
1594   }
1595   clear_const(current,rs1[i]);
1596   clear_const(current,rs2[i]);
1597   clear_const(current,rt1[i]);
1598   dirty_reg(current,rt1[i]);
1599 }
1600
1601 void imm16_alloc(struct regstat *current,int i)
1602 {
1603   if(rs1[i]&&needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1604   else lt1[i]=rs1[i];
1605   if(rt1[i]) alloc_reg(current,i,rt1[i]);
1606   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
1607     current->is32&=~(1LL<<rt1[i]);
1608     if(!((current->uu>>rt1[i])&1)||get_reg(current->regmap,rt1[i]|64)>=0) {
1609       // TODO: Could preserve the 32-bit flag if the immediate is zero
1610       alloc_reg64(current,i,rt1[i]);
1611       alloc_reg64(current,i,rs1[i]);
1612     }
1613     clear_const(current,rs1[i]);
1614     clear_const(current,rt1[i]);
1615   }
1616   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
1617     if((~current->is32>>rs1[i])&1) alloc_reg64(current,i,rs1[i]);
1618     current->is32|=1LL<<rt1[i];
1619     clear_const(current,rs1[i]);
1620     clear_const(current,rt1[i]);
1621   }
1622   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
1623     if(((~current->is32>>rs1[i])&1)&&opcode[i]>0x0c) {
1624       if(rs1[i]!=rt1[i]) {
1625         if(needed_again(rs1[i],i)) alloc_reg64(current,i,rs1[i]);
1626         alloc_reg64(current,i,rt1[i]);
1627         current->is32&=~(1LL<<rt1[i]);
1628       }
1629     }
1630     else current->is32|=1LL<<rt1[i]; // ANDI clears upper bits
1631     if(is_const(current,rs1[i])) {
1632       int v=get_const(current,rs1[i]);
1633       if(opcode[i]==0x0c) set_const(current,rt1[i],v&imm[i]);
1634       if(opcode[i]==0x0d) set_const(current,rt1[i],v|imm[i]);
1635       if(opcode[i]==0x0e) set_const(current,rt1[i],v^imm[i]);
1636     }
1637     else clear_const(current,rt1[i]);
1638   }
1639   else if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
1640     if(is_const(current,rs1[i])) {
1641       int v=get_const(current,rs1[i]);
1642       set_const(current,rt1[i],v+imm[i]);
1643     }
1644     else clear_const(current,rt1[i]);
1645     current->is32|=1LL<<rt1[i];
1646   }
1647   else {
1648     set_const(current,rt1[i],((long long)((short)imm[i]))<<16); // LUI
1649     current->is32|=1LL<<rt1[i];
1650   }
1651   dirty_reg(current,rt1[i]);
1652 }
1653
1654 void load_alloc(struct regstat *current,int i)
1655 {
1656   clear_const(current,rt1[i]);
1657   //if(rs1[i]!=rt1[i]&&needed_again(rs1[i],i)) clear_const(current,rs1[i]); // Does this help or hurt?
1658   if(!rs1[i]) current->u&=~1LL; // Allow allocating r0 if it's the source register
1659   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1660   if(rt1[i]&&!((current->u>>rt1[i])&1)) {
1661     alloc_reg(current,i,rt1[i]);
1662     assert(get_reg(current->regmap,rt1[i])>=0);
1663     if(opcode[i]==0x27||opcode[i]==0x37) // LWU/LD
1664     {
1665       current->is32&=~(1LL<<rt1[i]);
1666       alloc_reg64(current,i,rt1[i]);
1667     }
1668     else if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1669     {
1670       current->is32&=~(1LL<<rt1[i]);
1671       alloc_reg64(current,i,rt1[i]);
1672       alloc_all(current,i);
1673       alloc_reg64(current,i,FTEMP);
1674       minimum_free_regs[i]=HOST_REGS;
1675     }
1676     else current->is32|=1LL<<rt1[i];
1677     dirty_reg(current,rt1[i]);
1678     // If using TLB, need a register for pointer to the mapping table
1679     if(using_tlb) alloc_reg(current,i,TLREG);
1680     // LWL/LWR need a temporary register for the old value
1681     if(opcode[i]==0x22||opcode[i]==0x26)
1682     {
1683       alloc_reg(current,i,FTEMP);
1684       alloc_reg_temp(current,i,-1);
1685       minimum_free_regs[i]=1;
1686     }
1687   }
1688   else
1689   {
1690     // Load to r0 or unneeded register (dummy load)
1691     // but we still need a register to calculate the address
1692     if(opcode[i]==0x22||opcode[i]==0x26)
1693     {
1694       alloc_reg(current,i,FTEMP); // LWL/LWR need another temporary
1695     }
1696     // If using TLB, need a register for pointer to the mapping table
1697     if(using_tlb) alloc_reg(current,i,TLREG);
1698     alloc_reg_temp(current,i,-1);
1699     minimum_free_regs[i]=1;
1700     if(opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
1701     {
1702       alloc_all(current,i);
1703       alloc_reg64(current,i,FTEMP);
1704       minimum_free_regs[i]=HOST_REGS;
1705     }
1706   }
1707 }
1708
1709 void store_alloc(struct regstat *current,int i)
1710 {
1711   clear_const(current,rs2[i]);
1712   if(!(rs2[i])) current->u&=~1LL; // Allow allocating r0 if necessary
1713   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1714   alloc_reg(current,i,rs2[i]);
1715   if(opcode[i]==0x2c||opcode[i]==0x2d||opcode[i]==0x3f) { // 64-bit SDL/SDR/SD
1716     alloc_reg64(current,i,rs2[i]);
1717     if(rs2[i]) alloc_reg(current,i,FTEMP);
1718   }
1719   // If using TLB, need a register for pointer to the mapping table
1720   if(using_tlb) alloc_reg(current,i,TLREG);
1721   #if defined(HOST_IMM8)
1722   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1723   else alloc_reg(current,i,INVCP);
1724   #endif
1725   if(opcode[i]==0x2a||opcode[i]==0x2e||opcode[i]==0x2c||opcode[i]==0x2d) { // SWL/SWL/SDL/SDR
1726     alloc_reg(current,i,FTEMP);
1727   }
1728   // We need a temporary register for address generation
1729   alloc_reg_temp(current,i,-1);
1730   minimum_free_regs[i]=1;
1731 }
1732
1733 void c1ls_alloc(struct regstat *current,int i)
1734 {
1735   //clear_const(current,rs1[i]); // FIXME
1736   clear_const(current,rt1[i]);
1737   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1738   alloc_reg(current,i,CSREG); // Status
1739   alloc_reg(current,i,FTEMP);
1740   if(opcode[i]==0x35||opcode[i]==0x3d) { // 64-bit LDC1/SDC1
1741     alloc_reg64(current,i,FTEMP);
1742   }
1743   // If using TLB, need a register for pointer to the mapping table
1744   if(using_tlb) alloc_reg(current,i,TLREG);
1745   #if defined(HOST_IMM8)
1746   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1747   else if((opcode[i]&0x3b)==0x39) // SWC1/SDC1
1748     alloc_reg(current,i,INVCP);
1749   #endif
1750   // We need a temporary register for address generation
1751   alloc_reg_temp(current,i,-1);
1752 }
1753
1754 void c2ls_alloc(struct regstat *current,int i)
1755 {
1756   clear_const(current,rt1[i]);
1757   if(needed_again(rs1[i],i)) alloc_reg(current,i,rs1[i]);
1758   alloc_reg(current,i,FTEMP);
1759   // If using TLB, need a register for pointer to the mapping table
1760   if(using_tlb) alloc_reg(current,i,TLREG);
1761   #if defined(HOST_IMM8)
1762   // On CPUs without 32-bit immediates we need a pointer to invalid_code
1763   else if((opcode[i]&0x3b)==0x3a) // SWC2/SDC2
1764     alloc_reg(current,i,INVCP);
1765   #endif
1766   // We need a temporary register for address generation
1767   alloc_reg_temp(current,i,-1);
1768   minimum_free_regs[i]=1;
1769 }
1770
1771 #ifndef multdiv_alloc
1772 void multdiv_alloc(struct regstat *current,int i)
1773 {
1774   //  case 0x18: MULT
1775   //  case 0x19: MULTU
1776   //  case 0x1A: DIV
1777   //  case 0x1B: DIVU
1778   //  case 0x1C: DMULT
1779   //  case 0x1D: DMULTU
1780   //  case 0x1E: DDIV
1781   //  case 0x1F: DDIVU
1782   clear_const(current,rs1[i]);
1783   clear_const(current,rs2[i]);
1784   if(rs1[i]&&rs2[i])
1785   {
1786     if((opcode2[i]&4)==0) // 32-bit
1787     {
1788       current->u&=~(1LL<<HIREG);
1789       current->u&=~(1LL<<LOREG);
1790       alloc_reg(current,i,HIREG);
1791       alloc_reg(current,i,LOREG);
1792       alloc_reg(current,i,rs1[i]);
1793       alloc_reg(current,i,rs2[i]);
1794       current->is32|=1LL<<HIREG;
1795       current->is32|=1LL<<LOREG;
1796       dirty_reg(current,HIREG);
1797       dirty_reg(current,LOREG);
1798     }
1799     else // 64-bit
1800     {
1801       current->u&=~(1LL<<HIREG);
1802       current->u&=~(1LL<<LOREG);
1803       current->uu&=~(1LL<<HIREG);
1804       current->uu&=~(1LL<<LOREG);
1805       alloc_reg64(current,i,HIREG);
1806       //if(HOST_REGS>10) alloc_reg64(current,i,LOREG);
1807       alloc_reg64(current,i,rs1[i]);
1808       alloc_reg64(current,i,rs2[i]);
1809       alloc_all(current,i);
1810       current->is32&=~(1LL<<HIREG);
1811       current->is32&=~(1LL<<LOREG);
1812       dirty_reg(current,HIREG);
1813       dirty_reg(current,LOREG);
1814       minimum_free_regs[i]=HOST_REGS;
1815     }
1816   }
1817   else
1818   {
1819     // Multiply by zero is zero.
1820     // MIPS does not have a divide by zero exception.
1821     // The result is undefined, we return zero.
1822     alloc_reg(current,i,HIREG);
1823     alloc_reg(current,i,LOREG);
1824     current->is32|=1LL<<HIREG;
1825     current->is32|=1LL<<LOREG;
1826     dirty_reg(current,HIREG);
1827     dirty_reg(current,LOREG);
1828   }
1829 }
1830 #endif
1831
1832 void cop0_alloc(struct regstat *current,int i)
1833 {
1834   if(opcode2[i]==0) // MFC0
1835   {
1836     if(rt1[i]) {
1837       clear_const(current,rt1[i]);
1838       alloc_all(current,i);
1839       alloc_reg(current,i,rt1[i]);
1840       current->is32|=1LL<<rt1[i];
1841       dirty_reg(current,rt1[i]);
1842     }
1843   }
1844   else if(opcode2[i]==4) // MTC0
1845   {
1846     if(rs1[i]){
1847       clear_const(current,rs1[i]);
1848       alloc_reg(current,i,rs1[i]);
1849       alloc_all(current,i);
1850     }
1851     else {
1852       alloc_all(current,i); // FIXME: Keep r0
1853       current->u&=~1LL;
1854       alloc_reg(current,i,0);
1855     }
1856   }
1857   else
1858   {
1859     // TLBR/TLBWI/TLBWR/TLBP/ERET
1860     assert(opcode2[i]==0x10);
1861     alloc_all(current,i);
1862   }
1863   minimum_free_regs[i]=HOST_REGS;
1864 }
1865
1866 void cop1_alloc(struct regstat *current,int i)
1867 {
1868   alloc_reg(current,i,CSREG); // Load status
1869   if(opcode2[i]<3) // MFC1/DMFC1/CFC1
1870   {
1871     if(rt1[i]){
1872       clear_const(current,rt1[i]);
1873       if(opcode2[i]==1) {
1874         alloc_reg64(current,i,rt1[i]); // DMFC1
1875         current->is32&=~(1LL<<rt1[i]);
1876       }else{
1877         alloc_reg(current,i,rt1[i]); // MFC1/CFC1
1878         current->is32|=1LL<<rt1[i];
1879       }
1880       dirty_reg(current,rt1[i]);
1881     }
1882     alloc_reg_temp(current,i,-1);
1883   }
1884   else if(opcode2[i]>3) // MTC1/DMTC1/CTC1
1885   {
1886     if(rs1[i]){
1887       clear_const(current,rs1[i]);
1888       if(opcode2[i]==5)
1889         alloc_reg64(current,i,rs1[i]); // DMTC1
1890       else
1891         alloc_reg(current,i,rs1[i]); // MTC1/CTC1
1892       alloc_reg_temp(current,i,-1);
1893     }
1894     else {
1895       current->u&=~1LL;
1896       alloc_reg(current,i,0);
1897       alloc_reg_temp(current,i,-1);
1898     }
1899   }
1900   minimum_free_regs[i]=1;
1901 }
1902 void fconv_alloc(struct regstat *current,int i)
1903 {
1904   alloc_reg(current,i,CSREG); // Load status
1905   alloc_reg_temp(current,i,-1);
1906   minimum_free_regs[i]=1;
1907 }
1908 void float_alloc(struct regstat *current,int i)
1909 {
1910   alloc_reg(current,i,CSREG); // Load status
1911   alloc_reg_temp(current,i,-1);
1912   minimum_free_regs[i]=1;
1913 }
1914 void c2op_alloc(struct regstat *current,int i)
1915 {
1916   alloc_reg_temp(current,i,-1);
1917 }
1918 void fcomp_alloc(struct regstat *current,int i)
1919 {
1920   alloc_reg(current,i,CSREG); // Load status
1921   alloc_reg(current,i,FSREG); // Load flags
1922   dirty_reg(current,FSREG); // Flag will be modified
1923   alloc_reg_temp(current,i,-1);
1924   minimum_free_regs[i]=1;
1925 }
1926
1927 void syscall_alloc(struct regstat *current,int i)
1928 {
1929   alloc_cc(current,i);
1930   dirty_reg(current,CCREG);
1931   alloc_all(current,i);
1932   minimum_free_regs[i]=HOST_REGS;
1933   current->isconst=0;
1934 }
1935
1936 void delayslot_alloc(struct regstat *current,int i)
1937 {
1938   switch(itype[i]) {
1939     case UJUMP:
1940     case CJUMP:
1941     case SJUMP:
1942     case RJUMP:
1943     case FJUMP:
1944     case SYSCALL:
1945     case HLECALL:
1946     case SPAN:
1947       assem_debug("jump in the delay slot.  this shouldn't happen.\n");//exit(1);
1948       printf("Disabled speculative precompilation\n");
1949       stop_after_jal=1;
1950       break;
1951     case IMM16:
1952       imm16_alloc(current,i);
1953       break;
1954     case LOAD:
1955     case LOADLR:
1956       load_alloc(current,i);
1957       break;
1958     case STORE:
1959     case STORELR:
1960       store_alloc(current,i);
1961       break;
1962     case ALU:
1963       alu_alloc(current,i);
1964       break;
1965     case SHIFT:
1966       shift_alloc(current,i);
1967       break;
1968     case MULTDIV:
1969       multdiv_alloc(current,i);
1970       break;
1971     case SHIFTIMM:
1972       shiftimm_alloc(current,i);
1973       break;
1974     case MOV:
1975       mov_alloc(current,i);
1976       break;
1977     case COP0:
1978       cop0_alloc(current,i);
1979       break;
1980     case COP1:
1981     case COP2:
1982       cop1_alloc(current,i);
1983       break;
1984     case C1LS:
1985       c1ls_alloc(current,i);
1986       break;
1987     case C2LS:
1988       c2ls_alloc(current,i);
1989       break;
1990     case FCONV:
1991       fconv_alloc(current,i);
1992       break;
1993     case FLOAT:
1994       float_alloc(current,i);
1995       break;
1996     case FCOMP:
1997       fcomp_alloc(current,i);
1998       break;
1999     case C2OP:
2000       c2op_alloc(current,i);
2001       break;
2002   }
2003 }
2004
2005 // Special case where a branch and delay slot span two pages in virtual memory
2006 static void pagespan_alloc(struct regstat *current,int i)
2007 {
2008   current->isconst=0;
2009   current->wasconst=0;
2010   regs[i].wasconst=0;
2011   minimum_free_regs[i]=HOST_REGS;
2012   alloc_all(current,i);
2013   alloc_cc(current,i);
2014   dirty_reg(current,CCREG);
2015   if(opcode[i]==3) // JAL
2016   {
2017     alloc_reg(current,i,31);
2018     dirty_reg(current,31);
2019   }
2020   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
2021   {
2022     alloc_reg(current,i,rs1[i]);
2023     if (rt1[i]!=0) {
2024       alloc_reg(current,i,rt1[i]);
2025       dirty_reg(current,rt1[i]);
2026     }
2027   }
2028   if((opcode[i]&0x2E)==4) // BEQ/BNE/BEQL/BNEL
2029   {
2030     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2031     if(rs2[i]) alloc_reg(current,i,rs2[i]);
2032     if(!((current->is32>>rs1[i])&(current->is32>>rs2[i])&1))
2033     {
2034       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2035       if(rs2[i]) alloc_reg64(current,i,rs2[i]);
2036     }
2037   }
2038   else
2039   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ/BLEZL/BGTZL
2040   {
2041     if(rs1[i]) alloc_reg(current,i,rs1[i]);
2042     if(!((current->is32>>rs1[i])&1))
2043     {
2044       if(rs1[i]) alloc_reg64(current,i,rs1[i]);
2045     }
2046   }
2047   else
2048   if(opcode[i]==0x11) // BC1
2049   {
2050     alloc_reg(current,i,FSREG);
2051     alloc_reg(current,i,CSREG);
2052   }
2053   //else ...
2054 }
2055
2056 add_stub(int type,int addr,int retaddr,int a,int b,int c,int d,int e)
2057 {
2058   stubs[stubcount][0]=type;
2059   stubs[stubcount][1]=addr;
2060   stubs[stubcount][2]=retaddr;
2061   stubs[stubcount][3]=a;
2062   stubs[stubcount][4]=b;
2063   stubs[stubcount][5]=c;
2064   stubs[stubcount][6]=d;
2065   stubs[stubcount][7]=e;
2066   stubcount++;
2067 }
2068
2069 // Write out a single register
2070 void wb_register(signed char r,signed char regmap[],uint64_t dirty,uint64_t is32)
2071 {
2072   int hr;
2073   for(hr=0;hr<HOST_REGS;hr++) {
2074     if(hr!=EXCLUDE_REG) {
2075       if((regmap[hr]&63)==r) {
2076         if((dirty>>hr)&1) {
2077           if(regmap[hr]<64) {
2078             emit_storereg(r,hr);
2079 #ifndef FORCE32
2080             if((is32>>regmap[hr])&1) {
2081               emit_sarimm(hr,31,hr);
2082               emit_storereg(r|64,hr);
2083             }
2084 #endif
2085           }else{
2086             emit_storereg(r|64,hr);
2087           }
2088         }
2089       }
2090     }
2091   }
2092 }
2093
2094 int mchecksum()
2095 {
2096   //if(!tracedebug) return 0;
2097   int i;
2098   int sum=0;
2099   for(i=0;i<2097152;i++) {
2100     unsigned int temp=sum;
2101     sum<<=1;
2102     sum|=(~temp)>>31;
2103     sum^=((u_int *)rdram)[i];
2104   }
2105   return sum;
2106 }
2107 int rchecksum()
2108 {
2109   int i;
2110   int sum=0;
2111   for(i=0;i<64;i++)
2112     sum^=((u_int *)reg)[i];
2113   return sum;
2114 }
2115 void rlist()
2116 {
2117   int i;
2118   printf("TRACE: ");
2119   for(i=0;i<32;i++)
2120     printf("r%d:%8x%8x ",i,((int *)(reg+i))[1],((int *)(reg+i))[0]);
2121   printf("\n");
2122 #ifndef DISABLE_COP1
2123   printf("TRACE: ");
2124   for(i=0;i<32;i++)
2125     printf("f%d:%8x%8x ",i,((int*)reg_cop1_simple[i])[1],*((int*)reg_cop1_simple[i]));
2126   printf("\n");
2127 #endif
2128 }
2129
2130 void enabletrace()
2131 {
2132   tracedebug=1;
2133 }
2134
2135 void memdebug(int i)
2136 {
2137   //printf("TRACE: count=%d next=%d (checksum %x) lo=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[LOREG]>>32),(int)reg[LOREG]);
2138   //printf("TRACE: count=%d next=%d (rchecksum %x)\n",Count,next_interupt,rchecksum());
2139   //rlist();
2140   //if(tracedebug) {
2141   //if(Count>=-2084597794) {
2142   if((signed int)Count>=-2084597794&&(signed int)Count<0) {
2143   //if(0) {
2144     printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
2145     //printf("TRACE: count=%d next=%d (checksum %x) Status=%x\n",Count,next_interupt,mchecksum(),Status);
2146     //printf("TRACE: count=%d next=%d (checksum %x) hi=%8x%8x\n",Count,next_interupt,mchecksum(),(int)(reg[HIREG]>>32),(int)reg[HIREG]);
2147     rlist();
2148     #ifdef __i386__
2149     printf("TRACE: %x\n",(&i)[-1]);
2150     #endif
2151     #ifdef __arm__
2152     int j;
2153     printf("TRACE: %x \n",(&j)[10]);
2154     printf("TRACE: %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",(&j)[1],(&j)[2],(&j)[3],(&j)[4],(&j)[5],(&j)[6],(&j)[7],(&j)[8],(&j)[9],(&j)[10],(&j)[11],(&j)[12],(&j)[13],(&j)[14],(&j)[15],(&j)[16],(&j)[17],(&j)[18],(&j)[19],(&j)[20]);
2155     #endif
2156     //fflush(stdout);
2157   }
2158   //printf("TRACE: %x\n",(&i)[-1]);
2159 }
2160
2161 void tlb_debug(u_int cause, u_int addr, u_int iaddr)
2162 {
2163   printf("TLB Exception: instruction=%x addr=%x cause=%x\n",iaddr, addr, cause);
2164 }
2165
2166 void alu_assemble(int i,struct regstat *i_regs)
2167 {
2168   if(opcode2[i]>=0x20&&opcode2[i]<=0x23) { // ADD/ADDU/SUB/SUBU
2169     if(rt1[i]) {
2170       signed char s1,s2,t;
2171       t=get_reg(i_regs->regmap,rt1[i]);
2172       if(t>=0) {
2173         s1=get_reg(i_regs->regmap,rs1[i]);
2174         s2=get_reg(i_regs->regmap,rs2[i]);
2175         if(rs1[i]&&rs2[i]) {
2176           assert(s1>=0);
2177           assert(s2>=0);
2178           if(opcode2[i]&2) emit_sub(s1,s2,t);
2179           else emit_add(s1,s2,t);
2180         }
2181         else if(rs1[i]) {
2182           if(s1>=0) emit_mov(s1,t);
2183           else emit_loadreg(rs1[i],t);
2184         }
2185         else if(rs2[i]) {
2186           if(s2>=0) {
2187             if(opcode2[i]&2) emit_neg(s2,t);
2188             else emit_mov(s2,t);
2189           }
2190           else {
2191             emit_loadreg(rs2[i],t);
2192             if(opcode2[i]&2) emit_neg(t,t);
2193           }
2194         }
2195         else emit_zeroreg(t);
2196       }
2197     }
2198   }
2199   if(opcode2[i]>=0x2c&&opcode2[i]<=0x2f) { // DADD/DADDU/DSUB/DSUBU
2200     if(rt1[i]) {
2201       signed char s1l,s2l,s1h,s2h,tl,th;
2202       tl=get_reg(i_regs->regmap,rt1[i]);
2203       th=get_reg(i_regs->regmap,rt1[i]|64);
2204       if(tl>=0) {
2205         s1l=get_reg(i_regs->regmap,rs1[i]);
2206         s2l=get_reg(i_regs->regmap,rs2[i]);
2207         s1h=get_reg(i_regs->regmap,rs1[i]|64);
2208         s2h=get_reg(i_regs->regmap,rs2[i]|64);
2209         if(rs1[i]&&rs2[i]) {
2210           assert(s1l>=0);
2211           assert(s2l>=0);
2212           if(opcode2[i]&2) emit_subs(s1l,s2l,tl);
2213           else emit_adds(s1l,s2l,tl);
2214           if(th>=0) {
2215             #ifdef INVERTED_CARRY
2216             if(opcode2[i]&2) {if(s1h!=th) emit_mov(s1h,th);emit_sbb(th,s2h);}
2217             #else
2218             if(opcode2[i]&2) emit_sbc(s1h,s2h,th);
2219             #endif
2220             else emit_add(s1h,s2h,th);
2221           }
2222         }
2223         else if(rs1[i]) {
2224           if(s1l>=0) emit_mov(s1l,tl);
2225           else emit_loadreg(rs1[i],tl);
2226           if(th>=0) {
2227             if(s1h>=0) emit_mov(s1h,th);
2228             else emit_loadreg(rs1[i]|64,th);
2229           }
2230         }
2231         else if(rs2[i]) {
2232           if(s2l>=0) {
2233             if(opcode2[i]&2) emit_negs(s2l,tl);
2234             else emit_mov(s2l,tl);
2235           }
2236           else {
2237             emit_loadreg(rs2[i],tl);
2238             if(opcode2[i]&2) emit_negs(tl,tl);
2239           }
2240           if(th>=0) {
2241             #ifdef INVERTED_CARRY
2242             if(s2h>=0) emit_mov(s2h,th);
2243             else emit_loadreg(rs2[i]|64,th);
2244             if(opcode2[i]&2) {
2245               emit_adcimm(-1,th); // x86 has inverted carry flag
2246               emit_not(th,th);
2247             }
2248             #else
2249             if(opcode2[i]&2) {
2250               if(s2h>=0) emit_rscimm(s2h,0,th);
2251               else {
2252                 emit_loadreg(rs2[i]|64,th);
2253                 emit_rscimm(th,0,th);
2254               }
2255             }else{
2256               if(s2h>=0) emit_mov(s2h,th);
2257               else emit_loadreg(rs2[i]|64,th);
2258             }
2259             #endif
2260           }
2261         }
2262         else {
2263           emit_zeroreg(tl);
2264           if(th>=0) emit_zeroreg(th);
2265         }
2266       }
2267     }
2268   }
2269   if(opcode2[i]==0x2a||opcode2[i]==0x2b) { // SLT/SLTU
2270     if(rt1[i]) {
2271       signed char s1l,s1h,s2l,s2h,t;
2272       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1))
2273       {
2274         t=get_reg(i_regs->regmap,rt1[i]);
2275         //assert(t>=0);
2276         if(t>=0) {
2277           s1l=get_reg(i_regs->regmap,rs1[i]);
2278           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2279           s2l=get_reg(i_regs->regmap,rs2[i]);
2280           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2281           if(rs2[i]==0) // rx<r0
2282           {
2283             assert(s1h>=0);
2284             if(opcode2[i]==0x2a) // SLT
2285               emit_shrimm(s1h,31,t);
2286             else // SLTU (unsigned can not be less than zero)
2287               emit_zeroreg(t);
2288           }
2289           else if(rs1[i]==0) // r0<rx
2290           {
2291             assert(s2h>=0);
2292             if(opcode2[i]==0x2a) // SLT
2293               emit_set_gz64_32(s2h,s2l,t);
2294             else // SLTU (set if not zero)
2295               emit_set_nz64_32(s2h,s2l,t);
2296           }
2297           else {
2298             assert(s1l>=0);assert(s1h>=0);
2299             assert(s2l>=0);assert(s2h>=0);
2300             if(opcode2[i]==0x2a) // SLT
2301               emit_set_if_less64_32(s1h,s1l,s2h,s2l,t);
2302             else // SLTU
2303               emit_set_if_carry64_32(s1h,s1l,s2h,s2l,t);
2304           }
2305         }
2306       } else {
2307         t=get_reg(i_regs->regmap,rt1[i]);
2308         //assert(t>=0);
2309         if(t>=0) {
2310           s1l=get_reg(i_regs->regmap,rs1[i]);
2311           s2l=get_reg(i_regs->regmap,rs2[i]);
2312           if(rs2[i]==0) // rx<r0
2313           {
2314             assert(s1l>=0);
2315             if(opcode2[i]==0x2a) // SLT
2316               emit_shrimm(s1l,31,t);
2317             else // SLTU (unsigned can not be less than zero)
2318               emit_zeroreg(t);
2319           }
2320           else if(rs1[i]==0) // r0<rx
2321           {
2322             assert(s2l>=0);
2323             if(opcode2[i]==0x2a) // SLT
2324               emit_set_gz32(s2l,t);
2325             else // SLTU (set if not zero)
2326               emit_set_nz32(s2l,t);
2327           }
2328           else{
2329             assert(s1l>=0);assert(s2l>=0);
2330             if(opcode2[i]==0x2a) // SLT
2331               emit_set_if_less32(s1l,s2l,t);
2332             else // SLTU
2333               emit_set_if_carry32(s1l,s2l,t);
2334           }
2335         }
2336       }
2337     }
2338   }
2339   if(opcode2[i]>=0x24&&opcode2[i]<=0x27) { // AND/OR/XOR/NOR
2340     if(rt1[i]) {
2341       signed char s1l,s1h,s2l,s2h,th,tl;
2342       tl=get_reg(i_regs->regmap,rt1[i]);
2343       th=get_reg(i_regs->regmap,rt1[i]|64);
2344       if(!((i_regs->was32>>rs1[i])&(i_regs->was32>>rs2[i])&1)&&th>=0)
2345       {
2346         assert(tl>=0);
2347         if(tl>=0) {
2348           s1l=get_reg(i_regs->regmap,rs1[i]);
2349           s1h=get_reg(i_regs->regmap,rs1[i]|64);
2350           s2l=get_reg(i_regs->regmap,rs2[i]);
2351           s2h=get_reg(i_regs->regmap,rs2[i]|64);
2352           if(rs1[i]&&rs2[i]) {
2353             assert(s1l>=0);assert(s1h>=0);
2354             assert(s2l>=0);assert(s2h>=0);
2355             if(opcode2[i]==0x24) { // AND
2356               emit_and(s1l,s2l,tl);
2357               emit_and(s1h,s2h,th);
2358             } else
2359             if(opcode2[i]==0x25) { // OR
2360               emit_or(s1l,s2l,tl);
2361               emit_or(s1h,s2h,th);
2362             } else
2363             if(opcode2[i]==0x26) { // XOR
2364               emit_xor(s1l,s2l,tl);
2365               emit_xor(s1h,s2h,th);
2366             } else
2367             if(opcode2[i]==0x27) { // NOR
2368               emit_or(s1l,s2l,tl);
2369               emit_or(s1h,s2h,th);
2370               emit_not(tl,tl);
2371               emit_not(th,th);
2372             }
2373           }
2374           else
2375           {
2376             if(opcode2[i]==0x24) { // AND
2377               emit_zeroreg(tl);
2378               emit_zeroreg(th);
2379             } else
2380             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2381               if(rs1[i]){
2382                 if(s1l>=0) emit_mov(s1l,tl);
2383                 else emit_loadreg(rs1[i],tl);
2384                 if(s1h>=0) emit_mov(s1h,th);
2385                 else emit_loadreg(rs1[i]|64,th);
2386               }
2387               else
2388               if(rs2[i]){
2389                 if(s2l>=0) emit_mov(s2l,tl);
2390                 else emit_loadreg(rs2[i],tl);
2391                 if(s2h>=0) emit_mov(s2h,th);
2392                 else emit_loadreg(rs2[i]|64,th);
2393               }
2394               else{
2395                 emit_zeroreg(tl);
2396                 emit_zeroreg(th);
2397               }
2398             } else
2399             if(opcode2[i]==0x27) { // NOR
2400               if(rs1[i]){
2401                 if(s1l>=0) emit_not(s1l,tl);
2402                 else{
2403                   emit_loadreg(rs1[i],tl);
2404                   emit_not(tl,tl);
2405                 }
2406                 if(s1h>=0) emit_not(s1h,th);
2407                 else{
2408                   emit_loadreg(rs1[i]|64,th);
2409                   emit_not(th,th);
2410                 }
2411               }
2412               else
2413               if(rs2[i]){
2414                 if(s2l>=0) emit_not(s2l,tl);
2415                 else{
2416                   emit_loadreg(rs2[i],tl);
2417                   emit_not(tl,tl);
2418                 }
2419                 if(s2h>=0) emit_not(s2h,th);
2420                 else{
2421                   emit_loadreg(rs2[i]|64,th);
2422                   emit_not(th,th);
2423                 }
2424               }
2425               else {
2426                 emit_movimm(-1,tl);
2427                 emit_movimm(-1,th);
2428               }
2429             }
2430           }
2431         }
2432       }
2433       else
2434       {
2435         // 32 bit
2436         if(tl>=0) {
2437           s1l=get_reg(i_regs->regmap,rs1[i]);
2438           s2l=get_reg(i_regs->regmap,rs2[i]);
2439           if(rs1[i]&&rs2[i]) {
2440             assert(s1l>=0);
2441             assert(s2l>=0);
2442             if(opcode2[i]==0x24) { // AND
2443               emit_and(s1l,s2l,tl);
2444             } else
2445             if(opcode2[i]==0x25) { // OR
2446               emit_or(s1l,s2l,tl);
2447             } else
2448             if(opcode2[i]==0x26) { // XOR
2449               emit_xor(s1l,s2l,tl);
2450             } else
2451             if(opcode2[i]==0x27) { // NOR
2452               emit_or(s1l,s2l,tl);
2453               emit_not(tl,tl);
2454             }
2455           }
2456           else
2457           {
2458             if(opcode2[i]==0x24) { // AND
2459               emit_zeroreg(tl);
2460             } else
2461             if(opcode2[i]==0x25||opcode2[i]==0x26) { // OR/XOR
2462               if(rs1[i]){
2463                 if(s1l>=0) emit_mov(s1l,tl);
2464                 else emit_loadreg(rs1[i],tl); // CHECK: regmap_entry?
2465               }
2466               else
2467               if(rs2[i]){
2468                 if(s2l>=0) emit_mov(s2l,tl);
2469                 else emit_loadreg(rs2[i],tl); // CHECK: regmap_entry?
2470               }
2471               else emit_zeroreg(tl);
2472             } else
2473             if(opcode2[i]==0x27) { // NOR
2474               if(rs1[i]){
2475                 if(s1l>=0) emit_not(s1l,tl);
2476                 else {
2477                   emit_loadreg(rs1[i],tl);
2478                   emit_not(tl,tl);
2479                 }
2480               }
2481               else
2482               if(rs2[i]){
2483                 if(s2l>=0) emit_not(s2l,tl);
2484                 else {
2485                   emit_loadreg(rs2[i],tl);
2486                   emit_not(tl,tl);
2487                 }
2488               }
2489               else emit_movimm(-1,tl);
2490             }
2491           }
2492         }
2493       }
2494     }
2495   }
2496 }
2497
2498 void imm16_assemble(int i,struct regstat *i_regs)
2499 {
2500   if (opcode[i]==0x0f) { // LUI
2501     if(rt1[i]) {
2502       signed char t;
2503       t=get_reg(i_regs->regmap,rt1[i]);
2504       //assert(t>=0);
2505       if(t>=0) {
2506         if(!((i_regs->isconst>>t)&1))
2507           emit_movimm(imm[i]<<16,t);
2508       }
2509     }
2510   }
2511   if(opcode[i]==0x08||opcode[i]==0x09) { // ADDI/ADDIU
2512     if(rt1[i]) {
2513       signed char s,t;
2514       t=get_reg(i_regs->regmap,rt1[i]);
2515       s=get_reg(i_regs->regmap,rs1[i]);
2516       if(rs1[i]) {
2517         //assert(t>=0);
2518         //assert(s>=0);
2519         if(t>=0) {
2520           if(!((i_regs->isconst>>t)&1)) {
2521             if(s<0) {
2522               if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2523               emit_addimm(t,imm[i],t);
2524             }else{
2525               if(!((i_regs->wasconst>>s)&1))
2526                 emit_addimm(s,imm[i],t);
2527               else
2528                 emit_movimm(constmap[i][s]+imm[i],t);
2529             }
2530           }
2531         }
2532       } else {
2533         if(t>=0) {
2534           if(!((i_regs->isconst>>t)&1))
2535             emit_movimm(imm[i],t);
2536         }
2537       }
2538     }
2539   }
2540   if(opcode[i]==0x18||opcode[i]==0x19) { // DADDI/DADDIU
2541     if(rt1[i]) {
2542       signed char sh,sl,th,tl;
2543       th=get_reg(i_regs->regmap,rt1[i]|64);
2544       tl=get_reg(i_regs->regmap,rt1[i]);
2545       sh=get_reg(i_regs->regmap,rs1[i]|64);
2546       sl=get_reg(i_regs->regmap,rs1[i]);
2547       if(tl>=0) {
2548         if(rs1[i]) {
2549           assert(sh>=0);
2550           assert(sl>=0);
2551           if(th>=0) {
2552             emit_addimm64_32(sh,sl,imm[i],th,tl);
2553           }
2554           else {
2555             emit_addimm(sl,imm[i],tl);
2556           }
2557         } else {
2558           emit_movimm(imm[i],tl);
2559           if(th>=0) emit_movimm(((signed int)imm[i])>>31,th);
2560         }
2561       }
2562     }
2563   }
2564   else if(opcode[i]==0x0a||opcode[i]==0x0b) { // SLTI/SLTIU
2565     if(rt1[i]) {
2566       //assert(rs1[i]!=0); // r0 might be valid, but it's probably a bug
2567       signed char sh,sl,t;
2568       t=get_reg(i_regs->regmap,rt1[i]);
2569       sh=get_reg(i_regs->regmap,rs1[i]|64);
2570       sl=get_reg(i_regs->regmap,rs1[i]);
2571       //assert(t>=0);
2572       if(t>=0) {
2573         if(rs1[i]>0) {
2574           if(sh<0) assert((i_regs->was32>>rs1[i])&1);
2575           if(sh<0||((i_regs->was32>>rs1[i])&1)) {
2576             if(opcode[i]==0x0a) { // SLTI
2577               if(sl<0) {
2578                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2579                 emit_slti32(t,imm[i],t);
2580               }else{
2581                 emit_slti32(sl,imm[i],t);
2582               }
2583             }
2584             else { // SLTIU
2585               if(sl<0) {
2586                 if(i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2587                 emit_sltiu32(t,imm[i],t);
2588               }else{
2589                 emit_sltiu32(sl,imm[i],t);
2590               }
2591             }
2592           }else{ // 64-bit
2593             assert(sl>=0);
2594             if(opcode[i]==0x0a) // SLTI
2595               emit_slti64_32(sh,sl,imm[i],t);
2596             else // SLTIU
2597               emit_sltiu64_32(sh,sl,imm[i],t);
2598           }
2599         }else{
2600           // SLTI(U) with r0 is just stupid,
2601           // nonetheless examples can be found
2602           if(opcode[i]==0x0a) // SLTI
2603             if(0<imm[i]) emit_movimm(1,t);
2604             else emit_zeroreg(t);
2605           else // SLTIU
2606           {
2607             if(imm[i]) emit_movimm(1,t);
2608             else emit_zeroreg(t);
2609           }
2610         }
2611       }
2612     }
2613   }
2614   else if(opcode[i]>=0x0c&&opcode[i]<=0x0e) { // ANDI/ORI/XORI
2615     if(rt1[i]) {
2616       signed char sh,sl,th,tl;
2617       th=get_reg(i_regs->regmap,rt1[i]|64);
2618       tl=get_reg(i_regs->regmap,rt1[i]);
2619       sh=get_reg(i_regs->regmap,rs1[i]|64);
2620       sl=get_reg(i_regs->regmap,rs1[i]);
2621       if(tl>=0 && !((i_regs->isconst>>tl)&1)) {
2622         if(opcode[i]==0x0c) //ANDI
2623         {
2624           if(rs1[i]) {
2625             if(sl<0) {
2626               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2627               emit_andimm(tl,imm[i],tl);
2628             }else{
2629               if(!((i_regs->wasconst>>sl)&1))
2630                 emit_andimm(sl,imm[i],tl);
2631               else
2632                 emit_movimm(constmap[i][sl]&imm[i],tl);
2633             }
2634           }
2635           else
2636             emit_zeroreg(tl);
2637           if(th>=0) emit_zeroreg(th);
2638         }
2639         else
2640         {
2641           if(rs1[i]) {
2642             if(sl<0) {
2643               if(i_regs->regmap_entry[tl]!=rs1[i]) emit_loadreg(rs1[i],tl);
2644             }
2645             if(th>=0) {
2646               if(sh<0) {
2647                 emit_loadreg(rs1[i]|64,th);
2648               }else{
2649                 emit_mov(sh,th);
2650               }
2651             }
2652             if(opcode[i]==0x0d) //ORI
2653             if(sl<0) {
2654               emit_orimm(tl,imm[i],tl);
2655             }else{
2656               if(!((i_regs->wasconst>>sl)&1))
2657                 emit_orimm(sl,imm[i],tl);
2658               else
2659                 emit_movimm(constmap[i][sl]|imm[i],tl);
2660             }
2661             if(opcode[i]==0x0e) //XORI
2662             if(sl<0) {
2663               emit_xorimm(tl,imm[i],tl);
2664             }else{
2665               if(!((i_regs->wasconst>>sl)&1))
2666                 emit_xorimm(sl,imm[i],tl);
2667               else
2668                 emit_movimm(constmap[i][sl]^imm[i],tl);
2669             }
2670           }
2671           else {
2672             emit_movimm(imm[i],tl);
2673             if(th>=0) emit_zeroreg(th);
2674           }
2675         }
2676       }
2677     }
2678   }
2679 }
2680
2681 void shiftimm_assemble(int i,struct regstat *i_regs)
2682 {
2683   if(opcode2[i]<=0x3) // SLL/SRL/SRA
2684   {
2685     if(rt1[i]) {
2686       signed char s,t;
2687       t=get_reg(i_regs->regmap,rt1[i]);
2688       s=get_reg(i_regs->regmap,rs1[i]);
2689       //assert(t>=0);
2690       if(t>=0){
2691         if(rs1[i]==0)
2692         {
2693           emit_zeroreg(t);
2694         }
2695         else
2696         {
2697           if(s<0&&i_regs->regmap_entry[t]!=rs1[i]) emit_loadreg(rs1[i],t);
2698           if(imm[i]) {
2699             if(opcode2[i]==0) // SLL
2700             {
2701               emit_shlimm(s<0?t:s,imm[i],t);
2702             }
2703             if(opcode2[i]==2) // SRL
2704             {
2705               emit_shrimm(s<0?t:s,imm[i],t);
2706             }
2707             if(opcode2[i]==3) // SRA
2708             {
2709               emit_sarimm(s<0?t:s,imm[i],t);
2710             }
2711           }else{
2712             // Shift by zero
2713             if(s>=0 && s!=t) emit_mov(s,t);
2714           }
2715         }
2716       }
2717       //emit_storereg(rt1[i],t); //DEBUG
2718     }
2719   }
2720   if(opcode2[i]>=0x38&&opcode2[i]<=0x3b) // DSLL/DSRL/DSRA
2721   {
2722     if(rt1[i]) {
2723       signed char sh,sl,th,tl;
2724       th=get_reg(i_regs->regmap,rt1[i]|64);
2725       tl=get_reg(i_regs->regmap,rt1[i]);
2726       sh=get_reg(i_regs->regmap,rs1[i]|64);
2727       sl=get_reg(i_regs->regmap,rs1[i]);
2728       if(tl>=0) {
2729         if(rs1[i]==0)
2730         {
2731           emit_zeroreg(tl);
2732           if(th>=0) emit_zeroreg(th);
2733         }
2734         else
2735         {
2736           assert(sl>=0);
2737           assert(sh>=0);
2738           if(imm[i]) {
2739             if(opcode2[i]==0x38) // DSLL
2740             {
2741               if(th>=0) emit_shldimm(sh,sl,imm[i],th);
2742               emit_shlimm(sl,imm[i],tl);
2743             }
2744             if(opcode2[i]==0x3a) // DSRL
2745             {
2746               emit_shrdimm(sl,sh,imm[i],tl);
2747               if(th>=0) emit_shrimm(sh,imm[i],th);
2748             }
2749             if(opcode2[i]==0x3b) // DSRA
2750             {
2751               emit_shrdimm(sl,sh,imm[i],tl);
2752               if(th>=0) emit_sarimm(sh,imm[i],th);
2753             }
2754           }else{
2755             // Shift by zero
2756             if(sl!=tl) emit_mov(sl,tl);
2757             if(th>=0&&sh!=th) emit_mov(sh,th);
2758           }
2759         }
2760       }
2761     }
2762   }
2763   if(opcode2[i]==0x3c) // DSLL32
2764   {
2765     if(rt1[i]) {
2766       signed char sl,tl,th;
2767       tl=get_reg(i_regs->regmap,rt1[i]);
2768       th=get_reg(i_regs->regmap,rt1[i]|64);
2769       sl=get_reg(i_regs->regmap,rs1[i]);
2770       if(th>=0||tl>=0){
2771         assert(tl>=0);
2772         assert(th>=0);
2773         assert(sl>=0);
2774         emit_mov(sl,th);
2775         emit_zeroreg(tl);
2776         if(imm[i]>32)
2777         {
2778           emit_shlimm(th,imm[i]&31,th);
2779         }
2780       }
2781     }
2782   }
2783   if(opcode2[i]==0x3e) // DSRL32
2784   {
2785     if(rt1[i]) {
2786       signed char sh,tl,th;
2787       tl=get_reg(i_regs->regmap,rt1[i]);
2788       th=get_reg(i_regs->regmap,rt1[i]|64);
2789       sh=get_reg(i_regs->regmap,rs1[i]|64);
2790       if(tl>=0){
2791         assert(sh>=0);
2792         emit_mov(sh,tl);
2793         if(th>=0) emit_zeroreg(th);
2794         if(imm[i]>32)
2795         {
2796           emit_shrimm(tl,imm[i]&31,tl);
2797         }
2798       }
2799     }
2800   }
2801   if(opcode2[i]==0x3f) // DSRA32
2802   {
2803     if(rt1[i]) {
2804       signed char sh,tl;
2805       tl=get_reg(i_regs->regmap,rt1[i]);
2806       sh=get_reg(i_regs->regmap,rs1[i]|64);
2807       if(tl>=0){
2808         assert(sh>=0);
2809         emit_mov(sh,tl);
2810         if(imm[i]>32)
2811         {
2812           emit_sarimm(tl,imm[i]&31,tl);
2813         }
2814       }
2815     }
2816   }
2817 }
2818
2819 #ifndef shift_assemble
2820 void shift_assemble(int i,struct regstat *i_regs)
2821 {
2822   printf("Need shift_assemble for this architecture.\n");
2823   exit(1);
2824 }
2825 #endif
2826
2827 void load_assemble(int i,struct regstat *i_regs)
2828 {
2829   int s,th,tl,addr,map=-1;
2830   int offset;
2831   int jaddr=0;
2832   int memtarget=0,c=0;
2833   int fastload_reg_override=0;
2834   u_int hr,reglist=0;
2835   th=get_reg(i_regs->regmap,rt1[i]|64);
2836   tl=get_reg(i_regs->regmap,rt1[i]);
2837   s=get_reg(i_regs->regmap,rs1[i]);
2838   offset=imm[i];
2839   for(hr=0;hr<HOST_REGS;hr++) {
2840     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
2841   }
2842   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
2843   if(s>=0) {
2844     c=(i_regs->wasconst>>s)&1;
2845     if (c) {
2846       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
2847       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
2848     }
2849   }
2850   //printf("load_assemble: c=%d\n",c);
2851   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2852   // FIXME: Even if the load is a NOP, we should check for pagefaults...
2853 #ifdef PCSX
2854   if(tl<0&&(!c||(((u_int)constmap[i][s]+offset)>>16)==0x1f80)
2855     ||rt1[i]==0) {
2856       // could be FIFO, must perform the read
2857       // ||dummy read
2858       assem_debug("(forced read)\n");
2859       tl=get_reg(i_regs->regmap,-1);
2860       assert(tl>=0);
2861   }
2862 #endif
2863   if(offset||s<0||c) addr=tl;
2864   else addr=s;
2865   //if(tl<0) tl=get_reg(i_regs->regmap,-1);
2866  if(tl>=0) {
2867   //printf("load_assemble: c=%d\n",c);
2868   //if(c) printf("load_assemble: const=%x\n",(int)constmap[i][s]+offset);
2869   assert(tl>=0); // Even if the load is a NOP, we must check for pagefaults and I/O
2870   reglist&=~(1<<tl);
2871   if(th>=0) reglist&=~(1<<th);
2872   if(!using_tlb) {
2873     if(!c) {
2874       #ifdef RAM_OFFSET
2875       map=get_reg(i_regs->regmap,ROREG);
2876       if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
2877       #endif
2878 //#define R29_HACK 1
2879       #ifdef R29_HACK
2880       // Strmnnrmn's speed hack
2881       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
2882       #endif
2883       {
2884         #ifdef PCSX
2885         if(sp_in_mirror&&rs1[i]==29) {
2886           emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
2887           emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
2888           fastload_reg_override=HOST_TEMPREG;
2889         }
2890         else
2891         #endif
2892         emit_cmpimm(addr,RAM_SIZE);
2893         jaddr=(int)out;
2894         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
2895         // Hint to branch predictor that the branch is unlikely to be taken
2896         if(rs1[i]>=28)
2897           emit_jno_unlikely(0);
2898         else
2899         #endif
2900         emit_jno(0);
2901       }
2902     }
2903   }else{ // using tlb
2904     int x=0;
2905     if (opcode[i]==0x20||opcode[i]==0x24) x=3; // LB/LBU
2906     if (opcode[i]==0x21||opcode[i]==0x25) x=2; // LH/LHU
2907     map=get_reg(i_regs->regmap,TLREG);
2908     assert(map>=0);
2909     reglist&=~(1<<map);
2910     map=do_tlb_r(addr,tl,map,x,-1,-1,c,constmap[i][s]+offset);
2911     do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr);
2912   }
2913   int dummy=(rt1[i]==0)||(tl!=get_reg(i_regs->regmap,rt1[i])); // ignore loads to r0 and unneeded reg
2914   if (opcode[i]==0x20) { // LB
2915     if(!c||memtarget) {
2916       if(!dummy) {
2917         #ifdef HOST_IMM_ADDR32
2918         if(c)
2919           emit_movsbl_tlb((constmap[i][s]+offset)^3,map,tl);
2920         else
2921         #endif
2922         {
2923           //emit_xorimm(addr,3,tl);
2924           //gen_tlb_addr_r(tl,map);
2925           //emit_movsbl_indexed((int)rdram-0x80000000,tl,tl);
2926           int x=0,a=tl;
2927 #ifdef BIG_ENDIAN_MIPS
2928           if(!c) emit_xorimm(addr,3,tl);
2929           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
2930 #else
2931           if(!c) a=addr;
2932 #endif
2933           if(fastload_reg_override) a=fastload_reg_override;
2934
2935           emit_movsbl_indexed_tlb(x,a,map,tl);
2936         }
2937       }
2938       if(jaddr)
2939         add_stub(LOADB_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2940     }
2941     else
2942       inline_readstub(LOADB_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2943   }
2944   if (opcode[i]==0x21) { // LH
2945     if(!c||memtarget) {
2946       if(!dummy) {
2947         #ifdef HOST_IMM_ADDR32
2948         if(c)
2949           emit_movswl_tlb((constmap[i][s]+offset)^2,map,tl);
2950         else
2951         #endif
2952         {
2953           int x=0,a=tl;
2954 #ifdef BIG_ENDIAN_MIPS
2955           if(!c) emit_xorimm(addr,2,tl);
2956           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
2957 #else
2958           if(!c) a=addr;
2959 #endif
2960           if(fastload_reg_override) a=fastload_reg_override;
2961           //#ifdef
2962           //emit_movswl_indexed_tlb(x,tl,map,tl);
2963           //else
2964           if(map>=0) {
2965             gen_tlb_addr_r(a,map);
2966             emit_movswl_indexed(x,a,tl);
2967           }else{
2968             #ifdef RAM_OFFSET
2969             emit_movswl_indexed(x,a,tl);
2970             #else
2971             emit_movswl_indexed((int)rdram-0x80000000+x,a,tl);
2972             #endif
2973           }
2974         }
2975       }
2976       if(jaddr)
2977         add_stub(LOADH_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2978     }
2979     else
2980       inline_readstub(LOADH_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
2981   }
2982   if (opcode[i]==0x23) { // LW
2983     if(!c||memtarget) {
2984       if(!dummy) {
2985         int a=addr;
2986         if(fastload_reg_override) a=fastload_reg_override;
2987         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
2988         #ifdef HOST_IMM_ADDR32
2989         if(c)
2990           emit_readword_tlb(constmap[i][s]+offset,map,tl);
2991         else
2992         #endif
2993         emit_readword_indexed_tlb(0,a,map,tl);
2994       }
2995       if(jaddr)
2996         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
2997     }
2998     else
2999       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3000   }
3001   if (opcode[i]==0x24) { // LBU
3002     if(!c||memtarget) {
3003       if(!dummy) {
3004         #ifdef HOST_IMM_ADDR32
3005         if(c)
3006           emit_movzbl_tlb((constmap[i][s]+offset)^3,map,tl);
3007         else
3008         #endif
3009         {
3010           //emit_xorimm(addr,3,tl);
3011           //gen_tlb_addr_r(tl,map);
3012           //emit_movzbl_indexed((int)rdram-0x80000000,tl,tl);
3013           int x=0,a=tl;
3014 #ifdef BIG_ENDIAN_MIPS
3015           if(!c) emit_xorimm(addr,3,tl);
3016           else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3017 #else
3018           if(!c) a=addr;
3019 #endif
3020           if(fastload_reg_override) a=fastload_reg_override;
3021
3022           emit_movzbl_indexed_tlb(x,a,map,tl);
3023         }
3024       }
3025       if(jaddr)
3026         add_stub(LOADBU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3027     }
3028     else
3029       inline_readstub(LOADBU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3030   }
3031   if (opcode[i]==0x25) { // LHU
3032     if(!c||memtarget) {
3033       if(!dummy) {
3034         #ifdef HOST_IMM_ADDR32
3035         if(c)
3036           emit_movzwl_tlb((constmap[i][s]+offset)^2,map,tl);
3037         else
3038         #endif
3039         {
3040           int x=0,a=tl;
3041 #ifdef BIG_ENDIAN_MIPS
3042           if(!c) emit_xorimm(addr,2,tl);
3043           else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3044 #else
3045           if(!c) a=addr;
3046 #endif
3047           if(fastload_reg_override) a=fastload_reg_override;
3048           //#ifdef
3049           //emit_movzwl_indexed_tlb(x,tl,map,tl);
3050           //#else
3051           if(map>=0) {
3052             gen_tlb_addr_r(a,map);
3053             emit_movzwl_indexed(x,a,tl);
3054           }else{
3055             #ifdef RAM_OFFSET
3056             emit_movzwl_indexed(x,a,tl);
3057             #else
3058             emit_movzwl_indexed((int)rdram-0x80000000+x,a,tl);
3059             #endif
3060           }
3061         }
3062       }
3063       if(jaddr)
3064         add_stub(LOADHU_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3065     }
3066     else
3067       inline_readstub(LOADHU_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3068   }
3069   if (opcode[i]==0x27) { // LWU
3070     assert(th>=0);
3071     if(!c||memtarget) {
3072       if(!dummy) {
3073         int a=addr;
3074         if(fastload_reg_override) a=fastload_reg_override;
3075         //emit_readword_indexed((int)rdram-0x80000000,addr,tl);
3076         #ifdef HOST_IMM_ADDR32
3077         if(c)
3078           emit_readword_tlb(constmap[i][s]+offset,map,tl);
3079         else
3080         #endif
3081         emit_readword_indexed_tlb(0,a,map,tl);
3082       }
3083       if(jaddr)
3084         add_stub(LOADW_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3085     }
3086     else {
3087       inline_readstub(LOADW_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3088     }
3089     emit_zeroreg(th);
3090   }
3091   if (opcode[i]==0x37) { // LD
3092     if(!c||memtarget) {
3093       if(!dummy) {
3094         int a=addr;
3095         if(fastload_reg_override) a=fastload_reg_override;
3096         //gen_tlb_addr_r(tl,map);
3097         //if(th>=0) emit_readword_indexed((int)rdram-0x80000000,addr,th);
3098         //emit_readword_indexed((int)rdram-0x7FFFFFFC,addr,tl);
3099         #ifdef HOST_IMM_ADDR32
3100         if(c)
3101           emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3102         else
3103         #endif
3104         emit_readdword_indexed_tlb(0,a,map,th,tl);
3105       }
3106       if(jaddr)
3107         add_stub(LOADD_STUB,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3108     }
3109     else
3110       inline_readstub(LOADD_STUB,i,constmap[i][s]+offset,i_regs->regmap,rt1[i],ccadj[i],reglist);
3111   }
3112  }
3113   //emit_storereg(rt1[i],tl); // DEBUG
3114   //if(opcode[i]==0x23)
3115   //if(opcode[i]==0x24)
3116   //if(opcode[i]==0x23||opcode[i]==0x24)
3117   /*if(opcode[i]==0x21||opcode[i]==0x23||opcode[i]==0x24)
3118   {
3119     //emit_pusha();
3120     save_regs(0x100f);
3121         emit_readword((int)&last_count,ECX);
3122         #ifdef __i386__
3123         if(get_reg(i_regs->regmap,CCREG)<0)
3124           emit_loadreg(CCREG,HOST_CCREG);
3125         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3126         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3127         emit_writeword(HOST_CCREG,(int)&Count);
3128         #endif
3129         #ifdef __arm__
3130         if(get_reg(i_regs->regmap,CCREG)<0)
3131           emit_loadreg(CCREG,0);
3132         else
3133           emit_mov(HOST_CCREG,0);
3134         emit_add(0,ECX,0);
3135         emit_addimm(0,2*ccadj[i],0);
3136         emit_writeword(0,(int)&Count);
3137         #endif
3138     emit_call((int)memdebug);
3139     //emit_popa();
3140     restore_regs(0x100f);
3141   }/**/
3142 }
3143
3144 #ifndef loadlr_assemble
3145 void loadlr_assemble(int i,struct regstat *i_regs)
3146 {
3147   printf("Need loadlr_assemble for this architecture.\n");
3148   exit(1);
3149 }
3150 #endif
3151
3152 void store_assemble(int i,struct regstat *i_regs)
3153 {
3154   int s,th,tl,map=-1;
3155   int addr,temp;
3156   int offset;
3157   int jaddr=0,jaddr2,type;
3158   int memtarget=0,c=0;
3159   int agr=AGEN1+(i&1);
3160   int faststore_reg_override=0;
3161   u_int hr,reglist=0;
3162   th=get_reg(i_regs->regmap,rs2[i]|64);
3163   tl=get_reg(i_regs->regmap,rs2[i]);
3164   s=get_reg(i_regs->regmap,rs1[i]);
3165   temp=get_reg(i_regs->regmap,agr);
3166   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3167   offset=imm[i];
3168   if(s>=0) {
3169     c=(i_regs->wasconst>>s)&1;
3170     if(c) {
3171       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3172       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3173     }
3174   }
3175   assert(tl>=0);
3176   assert(temp>=0);
3177   for(hr=0;hr<HOST_REGS;hr++) {
3178     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3179   }
3180   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3181   if(offset||s<0||c) addr=temp;
3182   else addr=s;
3183   if(!using_tlb) {
3184     if(!c) {
3185       #ifdef PCSX
3186       if(sp_in_mirror&&rs1[i]==29) {
3187         emit_andimm(addr,~0x00e00000,HOST_TEMPREG);
3188         emit_cmpimm(HOST_TEMPREG,RAM_SIZE);
3189         faststore_reg_override=HOST_TEMPREG;
3190       }
3191       else
3192       #endif
3193       #ifdef R29_HACK
3194       // Strmnnrmn's speed hack
3195       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3196       #endif
3197       emit_cmpimm(addr,RAM_SIZE);
3198       #ifdef DESTRUCTIVE_SHIFT
3199       if(s==addr) emit_mov(s,temp);
3200       #endif
3201       #ifdef R29_HACK
3202       memtarget=1;
3203       if(rs1[i]!=29||start<0x80001000||start>=0x80000000+RAM_SIZE)
3204       #endif
3205       {
3206         jaddr=(int)out;
3207         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
3208         // Hint to branch predictor that the branch is unlikely to be taken
3209         if(rs1[i]>=28)
3210           emit_jno_unlikely(0);
3211         else
3212         #endif
3213         emit_jno(0);
3214       }
3215     }
3216   }else{ // using tlb
3217     int x=0;
3218     if (opcode[i]==0x28) x=3; // SB
3219     if (opcode[i]==0x29) x=2; // SH
3220     map=get_reg(i_regs->regmap,TLREG);
3221     assert(map>=0);
3222     reglist&=~(1<<map);
3223     map=do_tlb_w(addr,temp,map,x,c,constmap[i][s]+offset);
3224     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3225   }
3226
3227   if (opcode[i]==0x28) { // SB
3228     if(!c||memtarget) {
3229       int x=0,a=temp;
3230 #ifdef BIG_ENDIAN_MIPS
3231       if(!c) emit_xorimm(addr,3,temp);
3232       else x=((constmap[i][s]+offset)^3)-(constmap[i][s]+offset);
3233 #else
3234       if(!c) a=addr;
3235 #endif
3236       if(faststore_reg_override) a=faststore_reg_override;
3237       //gen_tlb_addr_w(temp,map);
3238       //emit_writebyte_indexed(tl,(int)rdram-0x80000000,temp);
3239       emit_writebyte_indexed_tlb(tl,x,a,map,a);
3240     }
3241     type=STOREB_STUB;
3242   }
3243   if (opcode[i]==0x29) { // SH
3244     if(!c||memtarget) {
3245       int x=0,a=temp;
3246 #ifdef BIG_ENDIAN_MIPS
3247       if(!c) emit_xorimm(addr,2,temp);
3248       else x=((constmap[i][s]+offset)^2)-(constmap[i][s]+offset);
3249 #else
3250       if(!c) a=addr;
3251 #endif
3252       if(faststore_reg_override) a=faststore_reg_override;
3253       //#ifdef
3254       //emit_writehword_indexed_tlb(tl,x,temp,map,temp);
3255       //#else
3256       if(map>=0) {
3257         gen_tlb_addr_w(a,map);
3258         emit_writehword_indexed(tl,x,a);
3259       }else
3260         emit_writehword_indexed(tl,(int)rdram-0x80000000+x,a);
3261     }
3262     type=STOREH_STUB;
3263   }
3264   if (opcode[i]==0x2B) { // SW
3265     if(!c||memtarget) {
3266       int a=addr;
3267       if(faststore_reg_override) a=faststore_reg_override;
3268       //emit_writeword_indexed(tl,(int)rdram-0x80000000,addr);
3269       emit_writeword_indexed_tlb(tl,0,a,map,temp);
3270     }
3271     type=STOREW_STUB;
3272   }
3273   if (opcode[i]==0x3F) { // SD
3274     if(!c||memtarget) {
3275       int a=addr;
3276       if(faststore_reg_override) a=faststore_reg_override;
3277       if(rs2[i]) {
3278         assert(th>=0);
3279         //emit_writeword_indexed(th,(int)rdram-0x80000000,addr);
3280         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,addr);
3281         emit_writedword_indexed_tlb(th,tl,0,a,map,temp);
3282       }else{
3283         // Store zero
3284         //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3285         //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3286         emit_writedword_indexed_tlb(tl,tl,0,a,map,temp);
3287       }
3288     }
3289     type=STORED_STUB;
3290   }
3291 #ifdef PCSX
3292   if(jaddr) {
3293     // PCSX store handlers don't check invcode again
3294     reglist|=1<<addr;
3295     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3296     jaddr=0;
3297   }
3298 #endif
3299   if(!using_tlb) {
3300     if(!c||memtarget) {
3301       #ifdef DESTRUCTIVE_SHIFT
3302       // The x86 shift operation is 'destructive'; it overwrites the
3303       // source register, so we need to make a copy first and use that.
3304       addr=temp;
3305       #endif
3306       #if defined(HOST_IMM8)
3307       int ir=get_reg(i_regs->regmap,INVCP);
3308       assert(ir>=0);
3309       emit_cmpmem_indexedsr12_reg(ir,addr,1);
3310       #else
3311       emit_cmpmem_indexedsr12_imm((int)invalid_code,addr,1);
3312       #endif
3313       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3314       emit_callne(invalidate_addr_reg[addr]);
3315       #else
3316       jaddr2=(int)out;
3317       emit_jne(0);
3318       add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),addr,0,0,0);
3319       #endif
3320     }
3321   }
3322   if(jaddr) {
3323     add_stub(type,jaddr,(int)out,i,addr,(int)i_regs,ccadj[i],reglist);
3324   } else if(c&&!memtarget) {
3325     inline_writestub(type,i,constmap[i][s]+offset,i_regs->regmap,rs2[i],ccadj[i],reglist);
3326   }
3327   //if(opcode[i]==0x2B || opcode[i]==0x3F)
3328   //if(opcode[i]==0x2B || opcode[i]==0x28)
3329   //if(opcode[i]==0x2B || opcode[i]==0x29)
3330   //if(opcode[i]==0x2B)
3331   /*if(opcode[i]==0x2B || opcode[i]==0x28 || opcode[i]==0x29 || opcode[i]==0x3F)
3332   {
3333     #ifdef __i386__
3334     emit_pusha();
3335     #endif
3336     #ifdef __arm__
3337     save_regs(0x100f);
3338     #endif
3339         emit_readword((int)&last_count,ECX);
3340         #ifdef __i386__
3341         if(get_reg(i_regs->regmap,CCREG)<0)
3342           emit_loadreg(CCREG,HOST_CCREG);
3343         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3344         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3345         emit_writeword(HOST_CCREG,(int)&Count);
3346         #endif
3347         #ifdef __arm__
3348         if(get_reg(i_regs->regmap,CCREG)<0)
3349           emit_loadreg(CCREG,0);
3350         else
3351           emit_mov(HOST_CCREG,0);
3352         emit_add(0,ECX,0);
3353         emit_addimm(0,2*ccadj[i],0);
3354         emit_writeword(0,(int)&Count);
3355         #endif
3356     emit_call((int)memdebug);
3357     #ifdef __i386__
3358     emit_popa();
3359     #endif
3360     #ifdef __arm__
3361     restore_regs(0x100f);
3362     #endif
3363   }/**/
3364 }
3365
3366 void storelr_assemble(int i,struct regstat *i_regs)
3367 {
3368   int s,th,tl;
3369   int temp;
3370   int temp2;
3371   int offset;
3372   int jaddr=0,jaddr2;
3373   int case1,case2,case3;
3374   int done0,done1,done2;
3375   int memtarget=0,c=0;
3376   int agr=AGEN1+(i&1);
3377   u_int hr,reglist=0;
3378   th=get_reg(i_regs->regmap,rs2[i]|64);
3379   tl=get_reg(i_regs->regmap,rs2[i]);
3380   s=get_reg(i_regs->regmap,rs1[i]);
3381   temp=get_reg(i_regs->regmap,agr);
3382   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3383   offset=imm[i];
3384   if(s>=0) {
3385     c=(i_regs->isconst>>s)&1;
3386     if(c) {
3387       memtarget=((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE;
3388       if(using_tlb&&((signed int)(constmap[i][s]+offset))>=(signed int)0xC0000000) memtarget=1;
3389     }
3390   }
3391   assert(tl>=0);
3392   for(hr=0;hr<HOST_REGS;hr++) {
3393     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3394   }
3395   assert(temp>=0);
3396   if(!using_tlb) {
3397     if(!c) {
3398       emit_cmpimm(s<0||offset?temp:s,RAM_SIZE);
3399       if(!offset&&s!=temp) emit_mov(s,temp);
3400       jaddr=(int)out;
3401       emit_jno(0);
3402     }
3403     else
3404     {
3405       if(!memtarget||!rs1[i]) {
3406         jaddr=(int)out;
3407         emit_jmp(0);
3408       }
3409     }
3410     #ifdef RAM_OFFSET
3411     int map=get_reg(i_regs->regmap,ROREG);
3412     if(map<0) emit_loadreg(ROREG,map=HOST_TEMPREG);
3413     gen_tlb_addr_w(temp,map);
3414     #else
3415     if((u_int)rdram!=0x80000000) 
3416       emit_addimm_no_flags((u_int)rdram-(u_int)0x80000000,temp);
3417     #endif
3418   }else{ // using tlb
3419     int map=get_reg(i_regs->regmap,TLREG);
3420     assert(map>=0);
3421     reglist&=~(1<<map);
3422     map=do_tlb_w(c||s<0||offset?temp:s,temp,map,0,c,constmap[i][s]+offset);
3423     if(!c&&!offset&&s>=0) emit_mov(s,temp);
3424     do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr);
3425     if(!jaddr&&!memtarget) {
3426       jaddr=(int)out;
3427       emit_jmp(0);
3428     }
3429     gen_tlb_addr_w(temp,map);
3430   }
3431
3432   if (opcode[i]==0x2C||opcode[i]==0x2D) { // SDL/SDR
3433     temp2=get_reg(i_regs->regmap,FTEMP);
3434     if(!rs2[i]) temp2=th=tl;
3435   }
3436
3437 #ifndef BIG_ENDIAN_MIPS
3438     emit_xorimm(temp,3,temp);
3439 #endif
3440   emit_testimm(temp,2);
3441   case2=(int)out;
3442   emit_jne(0);
3443   emit_testimm(temp,1);
3444   case1=(int)out;
3445   emit_jne(0);
3446   // 0
3447   if (opcode[i]==0x2A) { // SWL
3448     emit_writeword_indexed(tl,0,temp);
3449   }
3450   if (opcode[i]==0x2E) { // SWR
3451     emit_writebyte_indexed(tl,3,temp);
3452   }
3453   if (opcode[i]==0x2C) { // SDL
3454     emit_writeword_indexed(th,0,temp);
3455     if(rs2[i]) emit_mov(tl,temp2);
3456   }
3457   if (opcode[i]==0x2D) { // SDR
3458     emit_writebyte_indexed(tl,3,temp);
3459     if(rs2[i]) emit_shldimm(th,tl,24,temp2);
3460   }
3461   done0=(int)out;
3462   emit_jmp(0);
3463   // 1
3464   set_jump_target(case1,(int)out);
3465   if (opcode[i]==0x2A) { // SWL
3466     // Write 3 msb into three least significant bytes
3467     if(rs2[i]) emit_rorimm(tl,8,tl);
3468     emit_writehword_indexed(tl,-1,temp);
3469     if(rs2[i]) emit_rorimm(tl,16,tl);
3470     emit_writebyte_indexed(tl,1,temp);
3471     if(rs2[i]) emit_rorimm(tl,8,tl);
3472   }
3473   if (opcode[i]==0x2E) { // SWR
3474     // Write two lsb into two most significant bytes
3475     emit_writehword_indexed(tl,1,temp);
3476   }
3477   if (opcode[i]==0x2C) { // SDL
3478     if(rs2[i]) emit_shrdimm(tl,th,8,temp2);
3479     // Write 3 msb into three least significant bytes
3480     if(rs2[i]) emit_rorimm(th,8,th);
3481     emit_writehword_indexed(th,-1,temp);
3482     if(rs2[i]) emit_rorimm(th,16,th);
3483     emit_writebyte_indexed(th,1,temp);
3484     if(rs2[i]) emit_rorimm(th,8,th);
3485   }
3486   if (opcode[i]==0x2D) { // SDR
3487     if(rs2[i]) emit_shldimm(th,tl,16,temp2);
3488     // Write two lsb into two most significant bytes
3489     emit_writehword_indexed(tl,1,temp);
3490   }
3491   done1=(int)out;
3492   emit_jmp(0);
3493   // 2
3494   set_jump_target(case2,(int)out);
3495   emit_testimm(temp,1);
3496   case3=(int)out;
3497   emit_jne(0);
3498   if (opcode[i]==0x2A) { // SWL
3499     // Write two msb into two least significant bytes
3500     if(rs2[i]) emit_rorimm(tl,16,tl);
3501     emit_writehword_indexed(tl,-2,temp);
3502     if(rs2[i]) emit_rorimm(tl,16,tl);
3503   }
3504   if (opcode[i]==0x2E) { // SWR
3505     // Write 3 lsb into three most significant bytes
3506     emit_writebyte_indexed(tl,-1,temp);
3507     if(rs2[i]) emit_rorimm(tl,8,tl);
3508     emit_writehword_indexed(tl,0,temp);
3509     if(rs2[i]) emit_rorimm(tl,24,tl);
3510   }
3511   if (opcode[i]==0x2C) { // SDL
3512     if(rs2[i]) emit_shrdimm(tl,th,16,temp2);
3513     // Write two msb into two least significant bytes
3514     if(rs2[i]) emit_rorimm(th,16,th);
3515     emit_writehword_indexed(th,-2,temp);
3516     if(rs2[i]) emit_rorimm(th,16,th);
3517   }
3518   if (opcode[i]==0x2D) { // SDR
3519     if(rs2[i]) emit_shldimm(th,tl,8,temp2);
3520     // Write 3 lsb into three most significant bytes
3521     emit_writebyte_indexed(tl,-1,temp);
3522     if(rs2[i]) emit_rorimm(tl,8,tl);
3523     emit_writehword_indexed(tl,0,temp);
3524     if(rs2[i]) emit_rorimm(tl,24,tl);
3525   }
3526   done2=(int)out;
3527   emit_jmp(0);
3528   // 3
3529   set_jump_target(case3,(int)out);
3530   if (opcode[i]==0x2A) { // SWL
3531     // Write msb into least significant byte
3532     if(rs2[i]) emit_rorimm(tl,24,tl);
3533     emit_writebyte_indexed(tl,-3,temp);
3534     if(rs2[i]) emit_rorimm(tl,8,tl);
3535   }
3536   if (opcode[i]==0x2E) { // SWR
3537     // Write entire word
3538     emit_writeword_indexed(tl,-3,temp);
3539   }
3540   if (opcode[i]==0x2C) { // SDL
3541     if(rs2[i]) emit_shrdimm(tl,th,24,temp2);
3542     // Write msb into least significant byte
3543     if(rs2[i]) emit_rorimm(th,24,th);
3544     emit_writebyte_indexed(th,-3,temp);
3545     if(rs2[i]) emit_rorimm(th,8,th);
3546   }
3547   if (opcode[i]==0x2D) { // SDR
3548     if(rs2[i]) emit_mov(th,temp2);
3549     // Write entire word
3550     emit_writeword_indexed(tl,-3,temp);
3551   }
3552   set_jump_target(done0,(int)out);
3553   set_jump_target(done1,(int)out);
3554   set_jump_target(done2,(int)out);
3555   if (opcode[i]==0x2C) { // SDL
3556     emit_testimm(temp,4);
3557     done0=(int)out;
3558     emit_jne(0);
3559     emit_andimm(temp,~3,temp);
3560     emit_writeword_indexed(temp2,4,temp);
3561     set_jump_target(done0,(int)out);
3562   }
3563   if (opcode[i]==0x2D) { // SDR
3564     emit_testimm(temp,4);
3565     done0=(int)out;
3566     emit_jeq(0);
3567     emit_andimm(temp,~3,temp);
3568     emit_writeword_indexed(temp2,-4,temp);
3569     set_jump_target(done0,(int)out);
3570   }
3571   if(!c||!memtarget)
3572     add_stub(STORELR_STUB,jaddr,(int)out,i,(int)i_regs,temp,ccadj[i],reglist);
3573   if(!using_tlb) {
3574     #ifdef RAM_OFFSET
3575     int map=get_reg(i_regs->regmap,ROREG);
3576     if(map<0) map=HOST_TEMPREG;
3577     gen_orig_addr_w(temp,map);
3578     #else
3579     emit_addimm_no_flags((u_int)0x80000000-(u_int)rdram,temp);
3580     #endif
3581     #if defined(HOST_IMM8)
3582     int ir=get_reg(i_regs->regmap,INVCP);
3583     assert(ir>=0);
3584     emit_cmpmem_indexedsr12_reg(ir,temp,1);
3585     #else
3586     emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3587     #endif
3588     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3589     emit_callne(invalidate_addr_reg[temp]);
3590     #else
3591     jaddr2=(int)out;
3592     emit_jne(0);
3593     add_stub(INVCODE_STUB,jaddr2,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3594     #endif
3595   }
3596   /*
3597     emit_pusha();
3598     //save_regs(0x100f);
3599         emit_readword((int)&last_count,ECX);
3600         if(get_reg(i_regs->regmap,CCREG)<0)
3601           emit_loadreg(CCREG,HOST_CCREG);
3602         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3603         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3604         emit_writeword(HOST_CCREG,(int)&Count);
3605     emit_call((int)memdebug);
3606     emit_popa();
3607     //restore_regs(0x100f);
3608   /**/
3609 }
3610
3611 void c1ls_assemble(int i,struct regstat *i_regs)
3612 {
3613 #ifndef DISABLE_COP1
3614   int s,th,tl;
3615   int temp,ar;
3616   int map=-1;
3617   int offset;
3618   int c=0;
3619   int jaddr,jaddr2=0,jaddr3,type;
3620   int agr=AGEN1+(i&1);
3621   u_int hr,reglist=0;
3622   th=get_reg(i_regs->regmap,FTEMP|64);
3623   tl=get_reg(i_regs->regmap,FTEMP);
3624   s=get_reg(i_regs->regmap,rs1[i]);
3625   temp=get_reg(i_regs->regmap,agr);
3626   if(temp<0) temp=get_reg(i_regs->regmap,-1);
3627   offset=imm[i];
3628   assert(tl>=0);
3629   assert(rs1[i]>0);
3630   assert(temp>=0);
3631   for(hr=0;hr<HOST_REGS;hr++) {
3632     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3633   }
3634   if(i_regs->regmap[HOST_CCREG]==CCREG) reglist&=~(1<<HOST_CCREG);
3635   if (opcode[i]==0x31||opcode[i]==0x35) // LWC1/LDC1
3636   {
3637     // Loads use a temporary register which we need to save
3638     reglist|=1<<temp;
3639   }
3640   if (opcode[i]==0x39||opcode[i]==0x3D) // SWC1/SDC1
3641     ar=temp;
3642   else // LWC1/LDC1
3643     ar=tl;
3644   //if(s<0) emit_loadreg(rs1[i],ar); //address_generation does this now
3645   //else c=(i_regs->wasconst>>s)&1;
3646   if(s>=0) c=(i_regs->wasconst>>s)&1;
3647   // Check cop1 unusable
3648   if(!cop1_usable) {
3649     signed char rs=get_reg(i_regs->regmap,CSREG);
3650     assert(rs>=0);
3651     emit_testimm(rs,0x20000000);
3652     jaddr=(int)out;
3653     emit_jeq(0);
3654     add_stub(FP_STUB,jaddr,(int)out,i,rs,(int)i_regs,is_delayslot,0);
3655     cop1_usable=1;
3656   }
3657   if (opcode[i]==0x39) { // SWC1 (get float address)
3658     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],tl);
3659   }
3660   if (opcode[i]==0x3D) { // SDC1 (get double address)
3661     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],tl);
3662   }
3663   // Generate address + offset
3664   if(!using_tlb) {
3665     if(!c)
3666       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3667   }
3668   else
3669   {
3670     map=get_reg(i_regs->regmap,TLREG);
3671     assert(map>=0);
3672     reglist&=~(1<<map);
3673     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3674       map=do_tlb_r(offset||c||s<0?ar:s,ar,map,0,-1,-1,c,constmap[i][s]+offset);
3675     }
3676     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3677       map=do_tlb_w(offset||c||s<0?ar:s,ar,map,0,c,constmap[i][s]+offset);
3678     }
3679   }
3680   if (opcode[i]==0x39) { // SWC1 (read float)
3681     emit_readword_indexed(0,tl,tl);
3682   }
3683   if (opcode[i]==0x3D) { // SDC1 (read double)
3684     emit_readword_indexed(4,tl,th);
3685     emit_readword_indexed(0,tl,tl);
3686   }
3687   if (opcode[i]==0x31) { // LWC1 (get target address)
3688     emit_readword((int)&reg_cop1_simple[(source[i]>>16)&0x1f],temp);
3689   }
3690   if (opcode[i]==0x35) { // LDC1 (get target address)
3691     emit_readword((int)&reg_cop1_double[(source[i]>>16)&0x1f],temp);
3692   }
3693   if(!using_tlb) {
3694     if(!c) {
3695       jaddr2=(int)out;
3696       emit_jno(0);
3697     }
3698     else if(((signed int)(constmap[i][s]+offset))>=(signed int)0x80000000+RAM_SIZE) {
3699       jaddr2=(int)out;
3700       emit_jmp(0); // inline_readstub/inline_writestub?  Very rare case
3701     }
3702     #ifdef DESTRUCTIVE_SHIFT
3703     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3704       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3705     }
3706     #endif
3707   }else{
3708     if (opcode[i]==0x31||opcode[i]==0x35) { // LWC1/LDC1
3709       do_tlb_r_branch(map,c,constmap[i][s]+offset,&jaddr2);
3710     }
3711     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3712       do_tlb_w_branch(map,c,constmap[i][s]+offset,&jaddr2);
3713     }
3714   }
3715   if (opcode[i]==0x31) { // LWC1
3716     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3717     //gen_tlb_addr_r(ar,map);
3718     //emit_readword_indexed((int)rdram-0x80000000,tl,tl);
3719     #ifdef HOST_IMM_ADDR32
3720     if(c) emit_readword_tlb(constmap[i][s]+offset,map,tl);
3721     else
3722     #endif
3723     emit_readword_indexed_tlb(0,offset||c||s<0?tl:s,map,tl);
3724     type=LOADW_STUB;
3725   }
3726   if (opcode[i]==0x35) { // LDC1
3727     assert(th>=0);
3728     //if(s>=0&&!c&&!offset) emit_mov(s,tl);
3729     //gen_tlb_addr_r(ar,map);
3730     //emit_readword_indexed((int)rdram-0x80000000,tl,th);
3731     //emit_readword_indexed((int)rdram-0x7FFFFFFC,tl,tl);
3732     #ifdef HOST_IMM_ADDR32
3733     if(c) emit_readdword_tlb(constmap[i][s]+offset,map,th,tl);
3734     else
3735     #endif
3736     emit_readdword_indexed_tlb(0,offset||c||s<0?tl:s,map,th,tl);
3737     type=LOADD_STUB;
3738   }
3739   if (opcode[i]==0x39) { // SWC1
3740     //emit_writeword_indexed(tl,(int)rdram-0x80000000,temp);
3741     emit_writeword_indexed_tlb(tl,0,offset||c||s<0?temp:s,map,temp);
3742     type=STOREW_STUB;
3743   }
3744   if (opcode[i]==0x3D) { // SDC1
3745     assert(th>=0);
3746     //emit_writeword_indexed(th,(int)rdram-0x80000000,temp);
3747     //emit_writeword_indexed(tl,(int)rdram-0x7FFFFFFC,temp);
3748     emit_writedword_indexed_tlb(th,tl,0,offset||c||s<0?temp:s,map,temp);
3749     type=STORED_STUB;
3750   }
3751   if(!using_tlb) {
3752     if (opcode[i]==0x39||opcode[i]==0x3D) { // SWC1/SDC1
3753       #ifndef DESTRUCTIVE_SHIFT
3754       temp=offset||c||s<0?ar:s;
3755       #endif
3756       #if defined(HOST_IMM8)
3757       int ir=get_reg(i_regs->regmap,INVCP);
3758       assert(ir>=0);
3759       emit_cmpmem_indexedsr12_reg(ir,temp,1);
3760       #else
3761       emit_cmpmem_indexedsr12_imm((int)invalid_code,temp,1);
3762       #endif
3763       #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3764       emit_callne(invalidate_addr_reg[temp]);
3765       #else
3766       jaddr3=(int)out;
3767       emit_jne(0);
3768       add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),temp,0,0,0);
3769       #endif
3770     }
3771   }
3772   if(jaddr2) add_stub(type,jaddr2,(int)out,i,offset||c||s<0?ar:s,(int)i_regs,ccadj[i],reglist);
3773   if (opcode[i]==0x31) { // LWC1 (write float)
3774     emit_writeword_indexed(tl,0,temp);
3775   }
3776   if (opcode[i]==0x35) { // LDC1 (write double)
3777     emit_writeword_indexed(th,4,temp);
3778     emit_writeword_indexed(tl,0,temp);
3779   }
3780   //if(opcode[i]==0x39)
3781   /*if(opcode[i]==0x39||opcode[i]==0x31)
3782   {
3783     emit_pusha();
3784         emit_readword((int)&last_count,ECX);
3785         if(get_reg(i_regs->regmap,CCREG)<0)
3786           emit_loadreg(CCREG,HOST_CCREG);
3787         emit_add(HOST_CCREG,ECX,HOST_CCREG);
3788         emit_addimm(HOST_CCREG,2*ccadj[i],HOST_CCREG);
3789         emit_writeword(HOST_CCREG,(int)&Count);
3790     emit_call((int)memdebug);
3791     emit_popa();
3792   }/**/
3793 #else
3794   cop1_unusable(i, i_regs);
3795 #endif
3796 }
3797
3798 void c2ls_assemble(int i,struct regstat *i_regs)
3799 {
3800   int s,tl;
3801   int ar;
3802   int offset;
3803   int memtarget=0,c=0;
3804   int jaddr2=0,jaddr3,type;
3805   int agr=AGEN1+(i&1);
3806   u_int hr,reglist=0;
3807   u_int copr=(source[i]>>16)&0x1f;
3808   s=get_reg(i_regs->regmap,rs1[i]);
3809   tl=get_reg(i_regs->regmap,FTEMP);
3810   offset=imm[i];
3811   assert(rs1[i]>0);
3812   assert(tl>=0);
3813   assert(!using_tlb);
3814
3815   for(hr=0;hr<HOST_REGS;hr++) {
3816     if(i_regs->regmap[hr]>=0) reglist|=1<<hr;
3817   }
3818   if(i_regs->regmap[HOST_CCREG]==CCREG)
3819     reglist&=~(1<<HOST_CCREG);
3820
3821   // get the address
3822   if (opcode[i]==0x3a) { // SWC2
3823     ar=get_reg(i_regs->regmap,agr);
3824     if(ar<0) ar=get_reg(i_regs->regmap,-1);
3825     reglist|=1<<ar;
3826   } else { // LWC2
3827     ar=tl;
3828   }
3829   if(s>=0) c=(i_regs->wasconst>>s)&1;
3830   memtarget=c&&(((signed int)(constmap[i][s]+offset))<(signed int)0x80000000+RAM_SIZE);
3831   if (!offset&&!c&&s>=0) ar=s;
3832   assert(ar>=0);
3833
3834   if (opcode[i]==0x3a) { // SWC2
3835     cop2_get_dreg(copr,tl,HOST_TEMPREG);
3836     type=STOREW_STUB;
3837   }
3838   else
3839     type=LOADW_STUB;
3840
3841   if(c&&!memtarget) {
3842     jaddr2=(int)out;
3843     emit_jmp(0); // inline_readstub/inline_writestub?
3844   }
3845   else {
3846     if(!c) {
3847       emit_cmpimm(offset||c||s<0?ar:s,RAM_SIZE);
3848       jaddr2=(int)out;
3849       emit_jno(0);
3850     }
3851     if (opcode[i]==0x32) { // LWC2
3852       #ifdef HOST_IMM_ADDR32
3853       if(c) emit_readword_tlb(constmap[i][s]+offset,-1,tl);
3854       else
3855       #endif
3856       emit_readword_indexed(0,ar,tl);
3857     }
3858     if (opcode[i]==0x3a) { // SWC2
3859       #ifdef DESTRUCTIVE_SHIFT
3860       if(!offset&&!c&&s>=0) emit_mov(s,ar);
3861       #endif
3862       emit_writeword_indexed(tl,0,ar);
3863     }
3864   }
3865   if(jaddr2)
3866     add_stub(type,jaddr2,(int)out,i,ar,(int)i_regs,ccadj[i],reglist);
3867   if (opcode[i]==0x3a) { // SWC2
3868 #if defined(HOST_IMM8)
3869     int ir=get_reg(i_regs->regmap,INVCP);
3870     assert(ir>=0);
3871     emit_cmpmem_indexedsr12_reg(ir,ar,1);
3872 #else
3873     emit_cmpmem_indexedsr12_imm((int)invalid_code,ar,1);
3874 #endif
3875     #if defined(HAVE_CONDITIONAL_CALL) && !defined(DESTRUCTIVE_SHIFT)
3876     emit_callne(invalidate_addr_reg[ar]);
3877     #else
3878     jaddr3=(int)out;
3879     emit_jne(0);
3880     add_stub(INVCODE_STUB,jaddr3,(int)out,reglist|(1<<HOST_CCREG),ar,0,0,0);
3881     #endif
3882   }
3883   if (opcode[i]==0x32) { // LWC2
3884     cop2_put_dreg(copr,tl,HOST_TEMPREG);
3885   }
3886 }
3887
3888 #ifndef multdiv_assemble
3889 void multdiv_assemble(int i,struct regstat *i_regs)
3890 {
3891   printf("Need multdiv_assemble for this architecture.\n");
3892   exit(1);
3893 }
3894 #endif
3895
3896 void mov_assemble(int i,struct regstat *i_regs)
3897 {
3898   //if(opcode2[i]==0x10||opcode2[i]==0x12) { // MFHI/MFLO
3899   //if(opcode2[i]==0x11||opcode2[i]==0x13) { // MTHI/MTLO
3900   if(rt1[i]) {
3901     signed char sh,sl,th,tl;
3902     th=get_reg(i_regs->regmap,rt1[i]|64);
3903     tl=get_reg(i_regs->regmap,rt1[i]);
3904     //assert(tl>=0);
3905     if(tl>=0) {
3906       sh=get_reg(i_regs->regmap,rs1[i]|64);
3907       sl=get_reg(i_regs->regmap,rs1[i]);
3908       if(sl>=0) emit_mov(sl,tl);
3909       else emit_loadreg(rs1[i],tl);
3910       if(th>=0) {
3911         if(sh>=0) emit_mov(sh,th);
3912         else emit_loadreg(rs1[i]|64,th);
3913       }
3914     }
3915   }
3916 }
3917
3918 #ifndef fconv_assemble
3919 void fconv_assemble(int i,struct regstat *i_regs)
3920 {
3921   printf("Need fconv_assemble for this architecture.\n");
3922   exit(1);
3923 }
3924 #endif
3925
3926 #if 0
3927 void float_assemble(int i,struct regstat *i_regs)
3928 {
3929   printf("Need float_assemble for this architecture.\n");
3930   exit(1);
3931 }
3932 #endif
3933
3934 void syscall_assemble(int i,struct regstat *i_regs)
3935 {
3936   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3937   assert(ccreg==HOST_CCREG);
3938   assert(!is_delayslot);
3939   emit_movimm(start+i*4,EAX); // Get PC
3940   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // CHECK: is this right?  There should probably be an extra cycle...
3941   emit_jmp((int)jump_syscall_hle); // XXX
3942 }
3943
3944 void hlecall_assemble(int i,struct regstat *i_regs)
3945 {
3946   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3947   assert(ccreg==HOST_CCREG);
3948   assert(!is_delayslot);
3949   emit_movimm(start+i*4+4,0); // Get PC
3950   emit_movimm((int)psxHLEt[source[i]&7],1);
3951   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG); // XXX
3952   emit_jmp((int)jump_hlecall);
3953 }
3954
3955 void intcall_assemble(int i,struct regstat *i_regs)
3956 {
3957   signed char ccreg=get_reg(i_regs->regmap,CCREG);
3958   assert(ccreg==HOST_CCREG);
3959   assert(!is_delayslot);
3960   emit_movimm(start+i*4,0); // Get PC
3961   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*ccadj[i],HOST_CCREG);
3962   emit_jmp((int)jump_intcall);
3963 }
3964
3965 void ds_assemble(int i,struct regstat *i_regs)
3966 {
3967   is_delayslot=1;
3968   switch(itype[i]) {
3969     case ALU:
3970       alu_assemble(i,i_regs);break;
3971     case IMM16:
3972       imm16_assemble(i,i_regs);break;
3973     case SHIFT:
3974       shift_assemble(i,i_regs);break;
3975     case SHIFTIMM:
3976       shiftimm_assemble(i,i_regs);break;
3977     case LOAD:
3978       load_assemble(i,i_regs);break;
3979     case LOADLR:
3980       loadlr_assemble(i,i_regs);break;
3981     case STORE:
3982       store_assemble(i,i_regs);break;
3983     case STORELR:
3984       storelr_assemble(i,i_regs);break;
3985     case COP0:
3986       cop0_assemble(i,i_regs);break;
3987     case COP1:
3988       cop1_assemble(i,i_regs);break;
3989     case C1LS:
3990       c1ls_assemble(i,i_regs);break;
3991     case COP2:
3992       cop2_assemble(i,i_regs);break;
3993     case C2LS:
3994       c2ls_assemble(i,i_regs);break;
3995     case C2OP:
3996       c2op_assemble(i,i_regs);break;
3997     case FCONV:
3998       fconv_assemble(i,i_regs);break;
3999     case FLOAT:
4000       float_assemble(i,i_regs);break;
4001     case FCOMP:
4002       fcomp_assemble(i,i_regs);break;
4003     case MULTDIV:
4004       multdiv_assemble(i,i_regs);break;
4005     case MOV:
4006       mov_assemble(i,i_regs);break;
4007     case SYSCALL:
4008     case HLECALL:
4009     case INTCALL:
4010     case SPAN:
4011     case UJUMP:
4012     case RJUMP:
4013     case CJUMP:
4014     case SJUMP:
4015     case FJUMP:
4016       printf("Jump in the delay slot.  This is probably a bug.\n");
4017   }
4018   is_delayslot=0;
4019 }
4020
4021 // Is the branch target a valid internal jump?
4022 int internal_branch(uint64_t i_is32,int addr)
4023 {
4024   if(addr&1) return 0; // Indirect (register) jump
4025   if(addr>=start && addr<start+slen*4-4)
4026   {
4027     int t=(addr-start)>>2;
4028     // Delay slots are not valid branch targets
4029     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4030     // 64 -> 32 bit transition requires a recompile
4031     /*if(is32[t]&~unneeded_reg_upper[t]&~i_is32)
4032     {
4033       if(requires_32bit[t]&~i_is32) printf("optimizable: no\n");
4034       else printf("optimizable: yes\n");
4035     }*/
4036     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4037 #ifndef FORCE32
4038     if(requires_32bit[t]&~i_is32) return 0;
4039     else
4040 #endif
4041       return 1;
4042   }
4043   return 0;
4044 }
4045
4046 #ifndef wb_invalidate
4047 void wb_invalidate(signed char pre[],signed char entry[],uint64_t dirty,uint64_t is32,
4048   uint64_t u,uint64_t uu)
4049 {
4050   int hr;
4051   for(hr=0;hr<HOST_REGS;hr++) {
4052     if(hr!=EXCLUDE_REG) {
4053       if(pre[hr]!=entry[hr]) {
4054         if(pre[hr]>=0) {
4055           if((dirty>>hr)&1) {
4056             if(get_reg(entry,pre[hr])<0) {
4057               if(pre[hr]<64) {
4058                 if(!((u>>pre[hr])&1)) {
4059                   emit_storereg(pre[hr],hr);
4060                   if( ((is32>>pre[hr])&1) && !((uu>>pre[hr])&1) ) {
4061                     emit_sarimm(hr,31,hr);
4062                     emit_storereg(pre[hr]|64,hr);
4063                   }
4064                 }
4065               }else{
4066                 if(!((uu>>(pre[hr]&63))&1) && !((is32>>(pre[hr]&63))&1)) {
4067                   emit_storereg(pre[hr],hr);
4068                 }
4069               }
4070             }
4071           }
4072         }
4073       }
4074     }
4075   }
4076   // Move from one register to another (no writeback)
4077   for(hr=0;hr<HOST_REGS;hr++) {
4078     if(hr!=EXCLUDE_REG) {
4079       if(pre[hr]!=entry[hr]) {
4080         if(pre[hr]>=0&&(pre[hr]&63)<TEMPREG) {
4081           int nr;
4082           if((nr=get_reg(entry,pre[hr]))>=0) {
4083             emit_mov(hr,nr);
4084           }
4085         }
4086       }
4087     }
4088   }
4089 }
4090 #endif
4091
4092 // Load the specified registers
4093 // This only loads the registers given as arguments because
4094 // we don't want to load things that will be overwritten
4095 void load_regs(signed char entry[],signed char regmap[],int is32,int rs1,int rs2)
4096 {
4097   int hr;
4098   // Load 32-bit regs
4099   for(hr=0;hr<HOST_REGS;hr++) {
4100     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4101       if(entry[hr]!=regmap[hr]) {
4102         if(regmap[hr]==rs1||regmap[hr]==rs2)
4103         {
4104           if(regmap[hr]==0) {
4105             emit_zeroreg(hr);
4106           }
4107           else
4108           {
4109             emit_loadreg(regmap[hr],hr);
4110           }
4111         }
4112       }
4113     }
4114   }
4115   //Load 64-bit regs
4116   for(hr=0;hr<HOST_REGS;hr++) {
4117     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4118       if(entry[hr]!=regmap[hr]) {
4119         if(regmap[hr]-64==rs1||regmap[hr]-64==rs2)
4120         {
4121           assert(regmap[hr]!=64);
4122           if((is32>>(regmap[hr]&63))&1) {
4123             int lr=get_reg(regmap,regmap[hr]-64);
4124             if(lr>=0)
4125               emit_sarimm(lr,31,hr);
4126             else
4127               emit_loadreg(regmap[hr],hr);
4128           }
4129           else
4130           {
4131             emit_loadreg(regmap[hr],hr);
4132           }
4133         }
4134       }
4135     }
4136   }
4137 }
4138
4139 // Load registers prior to the start of a loop
4140 // so that they are not loaded within the loop
4141 static void loop_preload(signed char pre[],signed char entry[])
4142 {
4143   int hr;
4144   for(hr=0;hr<HOST_REGS;hr++) {
4145     if(hr!=EXCLUDE_REG) {
4146       if(pre[hr]!=entry[hr]) {
4147         if(entry[hr]>=0) {
4148           if(get_reg(pre,entry[hr])<0) {
4149             assem_debug("loop preload:\n");
4150             //printf("loop preload: %d\n",hr);
4151             if(entry[hr]==0) {
4152               emit_zeroreg(hr);
4153             }
4154             else if(entry[hr]<TEMPREG)
4155             {
4156               emit_loadreg(entry[hr],hr);
4157             }
4158             else if(entry[hr]-64<TEMPREG)
4159             {
4160               emit_loadreg(entry[hr],hr);
4161             }
4162           }
4163         }
4164       }
4165     }
4166   }
4167 }
4168
4169 // Generate address for load/store instruction
4170 // goes to AGEN for writes, FTEMP for LOADLR and cop1/2 loads
4171 void address_generation(int i,struct regstat *i_regs,signed char entry[])
4172 {
4173   if(itype[i]==LOAD||itype[i]==LOADLR||itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS||itype[i]==C2LS) {
4174     int ra=-1;
4175     int agr=AGEN1+(i&1);
4176     int mgr=MGEN1+(i&1);
4177     if(itype[i]==LOAD) {
4178       ra=get_reg(i_regs->regmap,rt1[i]);
4179       if(ra<0) ra=get_reg(i_regs->regmap,-1); 
4180       assert(ra>=0);
4181     }
4182     if(itype[i]==LOADLR) {
4183       ra=get_reg(i_regs->regmap,FTEMP);
4184     }
4185     if(itype[i]==STORE||itype[i]==STORELR) {
4186       ra=get_reg(i_regs->regmap,agr);
4187       if(ra<0) ra=get_reg(i_regs->regmap,-1);
4188     }
4189     if(itype[i]==C1LS||itype[i]==C2LS) {
4190       if ((opcode[i]&0x3b)==0x31||(opcode[i]&0x3b)==0x32) // LWC1/LDC1/LWC2/LDC2
4191         ra=get_reg(i_regs->regmap,FTEMP);
4192       else { // SWC1/SDC1/SWC2/SDC2
4193         ra=get_reg(i_regs->regmap,agr);
4194         if(ra<0) ra=get_reg(i_regs->regmap,-1);
4195       }
4196     }
4197     int rs=get_reg(i_regs->regmap,rs1[i]);
4198     int rm=get_reg(i_regs->regmap,TLREG);
4199     if(ra>=0) {
4200       int offset=imm[i];
4201       int c=(i_regs->wasconst>>rs)&1;
4202       if(rs1[i]==0) {
4203         // Using r0 as a base address
4204         /*if(rm>=0) {
4205           if(!entry||entry[rm]!=mgr) {
4206             generate_map_const(offset,rm);
4207           } // else did it in the previous cycle
4208         }*/
4209         if(!entry||entry[ra]!=agr) {
4210           if (opcode[i]==0x22||opcode[i]==0x26) {
4211             emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4212           }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4213             emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4214           }else{
4215             emit_movimm(offset,ra);
4216           }
4217         } // else did it in the previous cycle
4218       }
4219       else if(rs<0) {
4220         if(!entry||entry[ra]!=rs1[i])
4221           emit_loadreg(rs1[i],ra);
4222         //if(!entry||entry[ra]!=rs1[i])
4223         //  printf("poor load scheduling!\n");
4224       }
4225       else if(c) {
4226         if(rm>=0) {
4227           if(!entry||entry[rm]!=mgr) {
4228             if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a) {
4229               // Stores to memory go thru the mapper to detect self-modifying
4230               // code, loads don't.
4231               if((unsigned int)(constmap[i][rs]+offset)>=0xC0000000 ||
4232                  (unsigned int)(constmap[i][rs]+offset)<0x80000000+RAM_SIZE )
4233                 generate_map_const(constmap[i][rs]+offset,rm);
4234             }else{
4235               if((signed int)(constmap[i][rs]+offset)>=(signed int)0xC0000000)
4236                 generate_map_const(constmap[i][rs]+offset,rm);
4237             }
4238           }
4239         }
4240         if(rs1[i]!=rt1[i]||itype[i]!=LOAD) {
4241           if(!entry||entry[ra]!=agr) {
4242             if (opcode[i]==0x22||opcode[i]==0x26) {
4243               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4244             }else if (opcode[i]==0x1a||opcode[i]==0x1b) {
4245               emit_movimm((constmap[i][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4246             }else{
4247               #ifdef HOST_IMM_ADDR32
4248               if((itype[i]!=LOAD&&(opcode[i]&0x3b)!=0x31&&(opcode[i]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4249                  (using_tlb&&((signed int)constmap[i][rs]+offset)>=(signed int)0xC0000000))
4250               #endif
4251               emit_movimm(constmap[i][rs]+offset,ra);
4252             }
4253           } // else did it in the previous cycle
4254         } // else load_consts already did it
4255       }
4256       if(offset&&!c&&rs1[i]) {
4257         if(rs>=0) {
4258           emit_addimm(rs,offset,ra);
4259         }else{
4260           emit_addimm(ra,offset,ra);
4261         }
4262       }
4263     }
4264   }
4265   // Preload constants for next instruction
4266   if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
4267     int agr,ra;
4268     #ifndef HOST_IMM_ADDR32
4269     // Mapper entry
4270     agr=MGEN1+((i+1)&1);
4271     ra=get_reg(i_regs->regmap,agr);
4272     if(ra>=0) {
4273       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4274       int offset=imm[i+1];
4275       int c=(regs[i+1].wasconst>>rs)&1;
4276       if(c) {
4277         if(itype[i+1]==STORE||itype[i+1]==STORELR
4278            ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1, SWC2/SDC2
4279           // Stores to memory go thru the mapper to detect self-modifying
4280           // code, loads don't.
4281           if((unsigned int)(constmap[i+1][rs]+offset)>=0xC0000000 ||
4282              (unsigned int)(constmap[i+1][rs]+offset)<0x80000000+RAM_SIZE )
4283             generate_map_const(constmap[i+1][rs]+offset,ra);
4284         }else{
4285           if((signed int)(constmap[i+1][rs]+offset)>=(signed int)0xC0000000)
4286             generate_map_const(constmap[i+1][rs]+offset,ra);
4287         }
4288       }
4289       /*else if(rs1[i]==0) {
4290         generate_map_const(offset,ra);
4291       }*/
4292     }
4293     #endif
4294     // Actual address
4295     agr=AGEN1+((i+1)&1);
4296     ra=get_reg(i_regs->regmap,agr);
4297     if(ra>=0) {
4298       int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
4299       int offset=imm[i+1];
4300       int c=(regs[i+1].wasconst>>rs)&1;
4301       if(c&&(rs1[i+1]!=rt1[i+1]||itype[i+1]!=LOAD)) {
4302         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4303           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFFC,ra); // LWL/LWR
4304         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4305           emit_movimm((constmap[i+1][rs]+offset)&0xFFFFFFF8,ra); // LDL/LDR
4306         }else{
4307           #ifdef HOST_IMM_ADDR32
4308           if((itype[i+1]!=LOAD&&(opcode[i+1]&0x3b)!=0x31&&(opcode[i+1]&0x3b)!=0x32) || // LWC1/LDC1/LWC2/LDC2
4309              (using_tlb&&((signed int)constmap[i+1][rs]+offset)>=(signed int)0xC0000000))
4310           #endif
4311           emit_movimm(constmap[i+1][rs]+offset,ra);
4312         }
4313       }
4314       else if(rs1[i+1]==0) {
4315         // Using r0 as a base address
4316         if (opcode[i+1]==0x22||opcode[i+1]==0x26) {
4317           emit_movimm(offset&0xFFFFFFFC,ra); // LWL/LWR
4318         }else if (opcode[i+1]==0x1a||opcode[i+1]==0x1b) {
4319           emit_movimm(offset&0xFFFFFFF8,ra); // LDL/LDR
4320         }else{
4321           emit_movimm(offset,ra);
4322         }
4323       }
4324     }
4325   }
4326 }
4327
4328 int get_final_value(int hr, int i, int *value)
4329 {
4330   int reg=regs[i].regmap[hr];
4331   while(i<slen-1) {
4332     if(regs[i+1].regmap[hr]!=reg) break;
4333     if(!((regs[i+1].isconst>>hr)&1)) break;
4334     if(bt[i+1]) break;
4335     i++;
4336   }
4337   if(i<slen-1) {
4338     if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP) {
4339       *value=constmap[i][hr];
4340       return 1;
4341     }
4342     if(!bt[i+1]) {
4343       if(itype[i+1]==UJUMP||itype[i+1]==RJUMP||itype[i+1]==CJUMP||itype[i+1]==SJUMP) {
4344         // Load in delay slot, out-of-order execution
4345         if(itype[i+2]==LOAD&&rs1[i+2]==reg&&rt1[i+2]==reg&&((regs[i+1].wasconst>>hr)&1))
4346         {
4347           #ifdef HOST_IMM_ADDR32
4348           if(!using_tlb||((signed int)constmap[i][hr]+imm[i+2])<(signed int)0xC0000000) return 0;
4349           #endif
4350           // Precompute load address
4351           *value=constmap[i][hr]+imm[i+2];
4352           return 1;
4353         }
4354       }
4355       if(itype[i+1]==LOAD&&rs1[i+1]==reg&&rt1[i+1]==reg)
4356       {
4357         #ifdef HOST_IMM_ADDR32
4358         if(!using_tlb||((signed int)constmap[i][hr]+imm[i+1])<(signed int)0xC0000000) return 0;
4359         #endif
4360         // Precompute load address
4361         *value=constmap[i][hr]+imm[i+1];
4362         //printf("c=%x imm=%x\n",(int)constmap[i][hr],imm[i+1]);
4363         return 1;
4364       }
4365     }
4366   }
4367   *value=constmap[i][hr];
4368   //printf("c=%x\n",(int)constmap[i][hr]);
4369   if(i==slen-1) return 1;
4370   if(reg<64) {
4371     return !((unneeded_reg[i+1]>>reg)&1);
4372   }else{
4373     return !((unneeded_reg_upper[i+1]>>reg)&1);
4374   }
4375 }
4376
4377 // Load registers with known constants
4378 void load_consts(signed char pre[],signed char regmap[],int is32,int i)
4379 {
4380   int hr;
4381   // Load 32-bit regs
4382   for(hr=0;hr<HOST_REGS;hr++) {
4383     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4384       //if(entry[hr]!=regmap[hr]) {
4385       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4386         if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4387           int value;
4388           if(get_final_value(hr,i,&value)) {
4389             if(value==0) {
4390               emit_zeroreg(hr);
4391             }
4392             else {
4393               emit_movimm(value,hr);
4394             }
4395           }
4396         }
4397       }
4398     }
4399   }
4400   // Load 64-bit regs
4401   for(hr=0;hr<HOST_REGS;hr++) {
4402     if(hr!=EXCLUDE_REG&&regmap[hr]>=0) {
4403       //if(entry[hr]!=regmap[hr]) {
4404       if(i==0||!((regs[i-1].isconst>>hr)&1)||pre[hr]!=regmap[hr]||bt[i]) {
4405         if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4406           if((is32>>(regmap[hr]&63))&1) {
4407             int lr=get_reg(regmap,regmap[hr]-64);
4408             assert(lr>=0);
4409             emit_sarimm(lr,31,hr);
4410           }
4411           else
4412           {
4413             int value;
4414             if(get_final_value(hr,i,&value)) {
4415               if(value==0) {
4416                 emit_zeroreg(hr);
4417               }
4418               else {
4419                 emit_movimm(value,hr);
4420               }
4421             }
4422           }
4423         }
4424       }
4425     }
4426   }
4427 }
4428 void load_all_consts(signed char regmap[],int is32,u_int dirty,int i)
4429 {
4430   int hr;
4431   // Load 32-bit regs
4432   for(hr=0;hr<HOST_REGS;hr++) {
4433     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4434       if(((regs[i].isconst>>hr)&1)&&regmap[hr]<64&&regmap[hr]>0) {
4435         int value=constmap[i][hr];
4436         if(value==0) {
4437           emit_zeroreg(hr);
4438         }
4439         else {
4440           emit_movimm(value,hr);
4441         }
4442       }
4443     }
4444   }
4445   // Load 64-bit regs
4446   for(hr=0;hr<HOST_REGS;hr++) {
4447     if(hr!=EXCLUDE_REG&&regmap[hr]>=0&&((dirty>>hr)&1)) {
4448       if(((regs[i].isconst>>hr)&1)&&regmap[hr]>64) {
4449         if((is32>>(regmap[hr]&63))&1) {
4450           int lr=get_reg(regmap,regmap[hr]-64);
4451           assert(lr>=0);
4452           emit_sarimm(lr,31,hr);
4453         }
4454         else
4455         {
4456           int value=constmap[i][hr];
4457           if(value==0) {
4458             emit_zeroreg(hr);
4459           }
4460           else {
4461             emit_movimm(value,hr);
4462           }
4463         }
4464       }
4465     }
4466   }
4467 }
4468
4469 // Write out all dirty registers (except cycle count)
4470 void wb_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty)
4471 {
4472   int hr;
4473   for(hr=0;hr<HOST_REGS;hr++) {
4474     if(hr!=EXCLUDE_REG) {
4475       if(i_regmap[hr]>0) {
4476         if(i_regmap[hr]!=CCREG) {
4477           if((i_dirty>>hr)&1) {
4478             if(i_regmap[hr]<64) {
4479               emit_storereg(i_regmap[hr],hr);
4480 #ifndef FORCE32
4481               if( ((i_is32>>i_regmap[hr])&1) ) {
4482                 #ifdef DESTRUCTIVE_WRITEBACK
4483                 emit_sarimm(hr,31,hr);
4484                 emit_storereg(i_regmap[hr]|64,hr);
4485                 #else
4486                 emit_sarimm(hr,31,HOST_TEMPREG);
4487                 emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4488                 #endif
4489               }
4490 #endif
4491             }else{
4492               if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4493                 emit_storereg(i_regmap[hr],hr);
4494               }
4495             }
4496           }
4497         }
4498       }
4499     }
4500   }
4501 }
4502 // Write out dirty registers that we need to reload (pair with load_needed_regs)
4503 // This writes the registers not written by store_regs_bt
4504 void wb_needed_dirtys(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4505 {
4506   int hr;
4507   int t=(addr-start)>>2;
4508   for(hr=0;hr<HOST_REGS;hr++) {
4509     if(hr!=EXCLUDE_REG) {
4510       if(i_regmap[hr]>0) {
4511         if(i_regmap[hr]!=CCREG) {
4512           if(i_regmap[hr]==regs[t].regmap_entry[hr] && ((regs[t].dirty>>hr)&1) && !(((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4513             if((i_dirty>>hr)&1) {
4514               if(i_regmap[hr]<64) {
4515                 emit_storereg(i_regmap[hr],hr);
4516 #ifndef FORCE32
4517                 if( ((i_is32>>i_regmap[hr])&1) ) {
4518                   #ifdef DESTRUCTIVE_WRITEBACK
4519                   emit_sarimm(hr,31,hr);
4520                   emit_storereg(i_regmap[hr]|64,hr);
4521                   #else
4522                   emit_sarimm(hr,31,HOST_TEMPREG);
4523                   emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4524                   #endif
4525                 }
4526 #endif
4527               }else{
4528                 if( !((i_is32>>(i_regmap[hr]&63))&1) ) {
4529                   emit_storereg(i_regmap[hr],hr);
4530                 }
4531               }
4532             }
4533           }
4534         }
4535       }
4536     }
4537   }
4538 }
4539
4540 // Load all registers (except cycle count)
4541 void load_all_regs(signed char i_regmap[])
4542 {
4543   int hr;
4544   for(hr=0;hr<HOST_REGS;hr++) {
4545     if(hr!=EXCLUDE_REG) {
4546       if(i_regmap[hr]==0) {
4547         emit_zeroreg(hr);
4548       }
4549       else
4550       if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4551       {
4552         emit_loadreg(i_regmap[hr],hr);
4553       }
4554     }
4555   }
4556 }
4557
4558 // Load all current registers also needed by next instruction
4559 void load_needed_regs(signed char i_regmap[],signed char next_regmap[])
4560 {
4561   int hr;
4562   for(hr=0;hr<HOST_REGS;hr++) {
4563     if(hr!=EXCLUDE_REG) {
4564       if(get_reg(next_regmap,i_regmap[hr])>=0) {
4565         if(i_regmap[hr]==0) {
4566           emit_zeroreg(hr);
4567         }
4568         else
4569         if(i_regmap[hr]>0 && (i_regmap[hr]&63)<TEMPREG && i_regmap[hr]!=CCREG)
4570         {
4571           emit_loadreg(i_regmap[hr],hr);
4572         }
4573       }
4574     }
4575   }
4576 }
4577
4578 // Load all regs, storing cycle count if necessary
4579 void load_regs_entry(int t)
4580 {
4581   int hr;
4582   if(is_ds[t]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER,HOST_CCREG);
4583   else if(ccadj[t]) emit_addimm(HOST_CCREG,-ccadj[t]*CLOCK_DIVIDER,HOST_CCREG);
4584   if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4585     emit_storereg(CCREG,HOST_CCREG);
4586   }
4587   // Load 32-bit regs
4588   for(hr=0;hr<HOST_REGS;hr++) {
4589     if(regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4590       if(regs[t].regmap_entry[hr]==0) {
4591         emit_zeroreg(hr);
4592       }
4593       else if(regs[t].regmap_entry[hr]!=CCREG)
4594       {
4595         emit_loadreg(regs[t].regmap_entry[hr],hr);
4596       }
4597     }
4598   }
4599   // Load 64-bit regs
4600   for(hr=0;hr<HOST_REGS;hr++) {
4601     if(regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4602       assert(regs[t].regmap_entry[hr]!=64);
4603       if((regs[t].was32>>(regs[t].regmap_entry[hr]&63))&1) {
4604         int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4605         if(lr<0) {
4606           emit_loadreg(regs[t].regmap_entry[hr],hr);
4607         }
4608         else
4609         {
4610           emit_sarimm(lr,31,hr);
4611         }
4612       }
4613       else
4614       {
4615         emit_loadreg(regs[t].regmap_entry[hr],hr);
4616       }
4617     }
4618   }
4619 }
4620
4621 // Store dirty registers prior to branch
4622 void store_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4623 {
4624   if(internal_branch(i_is32,addr))
4625   {
4626     int t=(addr-start)>>2;
4627     int hr;
4628     for(hr=0;hr<HOST_REGS;hr++) {
4629       if(hr!=EXCLUDE_REG) {
4630         if(i_regmap[hr]>0 && i_regmap[hr]!=CCREG) {
4631           if(i_regmap[hr]!=regs[t].regmap_entry[hr] || !((regs[t].dirty>>hr)&1) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4632             if((i_dirty>>hr)&1) {
4633               if(i_regmap[hr]<64) {
4634                 if(!((unneeded_reg[t]>>i_regmap[hr])&1)) {
4635                   emit_storereg(i_regmap[hr],hr);
4636                   if( ((i_is32>>i_regmap[hr])&1) && !((unneeded_reg_upper[t]>>i_regmap[hr])&1) ) {
4637                     #ifdef DESTRUCTIVE_WRITEBACK
4638                     emit_sarimm(hr,31,hr);
4639                     emit_storereg(i_regmap[hr]|64,hr);
4640                     #else
4641                     emit_sarimm(hr,31,HOST_TEMPREG);
4642                     emit_storereg(i_regmap[hr]|64,HOST_TEMPREG);
4643                     #endif
4644                   }
4645                 }
4646               }else{
4647                 if( !((i_is32>>(i_regmap[hr]&63))&1) && !((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1) ) {
4648                   emit_storereg(i_regmap[hr],hr);
4649                 }
4650               }
4651             }
4652           }
4653         }
4654       }
4655     }
4656   }
4657   else
4658   {
4659     // Branch out of this block, write out all dirty regs
4660     wb_dirtys(i_regmap,i_is32,i_dirty);
4661   }
4662 }
4663
4664 // Load all needed registers for branch target
4665 void load_regs_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4666 {
4667   //if(addr>=start && addr<(start+slen*4))
4668   if(internal_branch(i_is32,addr))
4669   {
4670     int t=(addr-start)>>2;
4671     int hr;
4672     // Store the cycle count before loading something else
4673     if(i_regmap[HOST_CCREG]!=CCREG) {
4674       assert(i_regmap[HOST_CCREG]==-1);
4675     }
4676     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) {
4677       emit_storereg(CCREG,HOST_CCREG);
4678     }
4679     // Load 32-bit regs
4680     for(hr=0;hr<HOST_REGS;hr++) {
4681       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=0&&regs[t].regmap_entry[hr]<TEMPREG) {
4682         #ifdef DESTRUCTIVE_WRITEBACK
4683         if(i_regmap[hr]!=regs[t].regmap_entry[hr] || ( !((regs[t].dirty>>hr)&1) && ((i_dirty>>hr)&1) && (((i_is32&~unneeded_reg_upper[t])>>i_regmap[hr])&1) ) || (((i_is32&~regs[t].was32&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)) {
4684         #else
4685         if(i_regmap[hr]!=regs[t].regmap_entry[hr] ) {
4686         #endif
4687           if(regs[t].regmap_entry[hr]==0) {
4688             emit_zeroreg(hr);
4689           }
4690           else if(regs[t].regmap_entry[hr]!=CCREG)
4691           {
4692             emit_loadreg(regs[t].regmap_entry[hr],hr);
4693           }
4694         }
4695       }
4696     }
4697     //Load 64-bit regs
4698     for(hr=0;hr<HOST_REGS;hr++) {
4699       if(hr!=EXCLUDE_REG&&regs[t].regmap_entry[hr]>=64&&regs[t].regmap_entry[hr]<TEMPREG+64) {
4700         if(i_regmap[hr]!=regs[t].regmap_entry[hr]) {
4701           assert(regs[t].regmap_entry[hr]!=64);
4702           if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4703             int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4704             if(lr<0) {
4705               emit_loadreg(regs[t].regmap_entry[hr],hr);
4706             }
4707             else
4708             {
4709               emit_sarimm(lr,31,hr);
4710             }
4711           }
4712           else
4713           {
4714             emit_loadreg(regs[t].regmap_entry[hr],hr);
4715           }
4716         }
4717         else if((i_is32>>(regs[t].regmap_entry[hr]&63))&1) {
4718           int lr=get_reg(regs[t].regmap_entry,regs[t].regmap_entry[hr]-64);
4719           assert(lr>=0);
4720           emit_sarimm(lr,31,hr);
4721         }
4722       }
4723     }
4724   }
4725 }
4726
4727 int match_bt(signed char i_regmap[],uint64_t i_is32,uint64_t i_dirty,int addr)
4728 {
4729   if(addr>=start && addr<start+slen*4-4)
4730   {
4731     int t=(addr-start)>>2;
4732     int hr;
4733     if(regs[t].regmap_entry[HOST_CCREG]!=CCREG) return 0;
4734     for(hr=0;hr<HOST_REGS;hr++)
4735     {
4736       if(hr!=EXCLUDE_REG)
4737       {
4738         if(i_regmap[hr]!=regs[t].regmap_entry[hr])
4739         {
4740           if(regs[t].regmap_entry[hr]>=0&&(regs[t].regmap_entry[hr]|64)<TEMPREG+64)
4741           {
4742             return 0;
4743           }
4744           else 
4745           if((i_dirty>>hr)&1)
4746           {
4747             if(i_regmap[hr]<TEMPREG)
4748             {
4749               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4750                 return 0;
4751             }
4752             else if(i_regmap[hr]>=64&&i_regmap[hr]<TEMPREG+64)
4753             {
4754               if(!((unneeded_reg_upper[t]>>(i_regmap[hr]&63))&1))
4755                 return 0;
4756             }
4757           }
4758         }
4759         else // Same register but is it 32-bit or dirty?
4760         if(i_regmap[hr]>=0)
4761         {
4762           if(!((regs[t].dirty>>hr)&1))
4763           {
4764             if((i_dirty>>hr)&1)
4765             {
4766               if(!((unneeded_reg[t]>>i_regmap[hr])&1))
4767               {
4768                 //printf("%x: dirty no match\n",addr);
4769                 return 0;
4770               }
4771             }
4772           }
4773           if((((regs[t].was32^i_is32)&~unneeded_reg_upper[t])>>(i_regmap[hr]&63))&1)
4774           {
4775             //printf("%x: is32 no match\n",addr);
4776             return 0;
4777           }
4778         }
4779       }
4780     }
4781     //if(is32[t]&~unneeded_reg_upper[t]&~i_is32) return 0;
4782 #ifndef FORCE32
4783     if(requires_32bit[t]&~i_is32) return 0;
4784 #endif
4785     // Delay slots are not valid branch targets
4786     //if(t>0&&(itype[t-1]==RJUMP||itype[t-1]==UJUMP||itype[t-1]==CJUMP||itype[t-1]==SJUMP||itype[t-1]==FJUMP)) return 0;
4787     // Delay slots require additional processing, so do not match
4788     if(is_ds[t]) return 0;
4789   }
4790   else
4791   {
4792     int hr;
4793     for(hr=0;hr<HOST_REGS;hr++)
4794     {
4795       if(hr!=EXCLUDE_REG)
4796       {
4797         if(i_regmap[hr]>=0)
4798         {
4799           if(hr!=HOST_CCREG||i_regmap[hr]!=CCREG)
4800           {
4801             if((i_dirty>>hr)&1)
4802             {
4803               return 0;
4804             }
4805           }
4806         }
4807       }
4808     }
4809   }
4810   return 1;
4811 }
4812
4813 // Used when a branch jumps into the delay slot of another branch
4814 void ds_assemble_entry(int i)
4815 {
4816   int t=(ba[i]-start)>>2;
4817   if(!instr_addr[t]) instr_addr[t]=(u_int)out;
4818   assem_debug("Assemble delay slot at %x\n",ba[i]);
4819   assem_debug("<->\n");
4820   if(regs[t].regmap_entry[HOST_CCREG]==CCREG&&regs[t].regmap[HOST_CCREG]!=CCREG)
4821     wb_register(CCREG,regs[t].regmap_entry,regs[t].wasdirty,regs[t].was32);
4822   load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,rs1[t],rs2[t]);
4823   address_generation(t,&regs[t],regs[t].regmap_entry);
4824   if(itype[t]==STORE||itype[t]==STORELR||(opcode[t]&0x3b)==0x39||(opcode[t]&0x3b)==0x3a)
4825     load_regs(regs[t].regmap_entry,regs[t].regmap,regs[t].was32,INVCP,INVCP);
4826   cop1_usable=0;
4827   is_delayslot=0;
4828   switch(itype[t]) {
4829     case ALU:
4830       alu_assemble(t,&regs[t]);break;
4831     case IMM16:
4832       imm16_assemble(t,&regs[t]);break;
4833     case SHIFT:
4834       shift_assemble(t,&regs[t]);break;
4835     case SHIFTIMM:
4836       shiftimm_assemble(t,&regs[t]);break;
4837     case LOAD:
4838       load_assemble(t,&regs[t]);break;
4839     case LOADLR:
4840       loadlr_assemble(t,&regs[t]);break;
4841     case STORE:
4842       store_assemble(t,&regs[t]);break;
4843     case STORELR:
4844       storelr_assemble(t,&regs[t]);break;
4845     case COP0:
4846       cop0_assemble(t,&regs[t]);break;
4847     case COP1:
4848       cop1_assemble(t,&regs[t]);break;
4849     case C1LS:
4850       c1ls_assemble(t,&regs[t]);break;
4851     case COP2:
4852       cop2_assemble(t,&regs[t]);break;
4853     case C2LS:
4854       c2ls_assemble(t,&regs[t]);break;
4855     case C2OP:
4856       c2op_assemble(t,&regs[t]);break;
4857     case FCONV:
4858       fconv_assemble(t,&regs[t]);break;
4859     case FLOAT:
4860       float_assemble(t,&regs[t]);break;
4861     case FCOMP:
4862       fcomp_assemble(t,&regs[t]);break;
4863     case MULTDIV:
4864       multdiv_assemble(t,&regs[t]);break;
4865     case MOV:
4866       mov_assemble(t,&regs[t]);break;
4867     case SYSCALL:
4868     case HLECALL:
4869     case INTCALL:
4870     case SPAN:
4871     case UJUMP:
4872     case RJUMP:
4873     case CJUMP:
4874     case SJUMP:
4875     case FJUMP:
4876       printf("Jump in the delay slot.  This is probably a bug.\n");
4877   }
4878   store_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4879   load_regs_bt(regs[t].regmap,regs[t].is32,regs[t].dirty,ba[i]+4);
4880   if(internal_branch(regs[t].is32,ba[i]+4))
4881     assem_debug("branch: internal\n");
4882   else
4883     assem_debug("branch: external\n");
4884   assert(internal_branch(regs[t].is32,ba[i]+4));
4885   add_to_linker((int)out,ba[i]+4,internal_branch(regs[t].is32,ba[i]+4));
4886   emit_jmp(0);
4887 }
4888
4889 void do_cc(int i,signed char i_regmap[],int *adj,int addr,int taken,int invert)
4890 {
4891   int count;
4892   int jaddr;
4893   int idle=0;
4894   if(itype[i]==RJUMP)
4895   {
4896     *adj=0;
4897   }
4898   //if(ba[i]>=start && ba[i]<(start+slen*4))
4899   if(internal_branch(branch_regs[i].is32,ba[i]))
4900   {
4901     int t=(ba[i]-start)>>2;
4902     if(is_ds[t]) *adj=-1; // Branch into delay slot adds an extra cycle
4903     else *adj=ccadj[t];
4904   }
4905   else
4906   {
4907     *adj=0;
4908   }
4909   count=ccadj[i];
4910   if(taken==TAKEN && i==(ba[i]-start)>>2 && source[i+1]==0) {
4911     // Idle loop
4912     if(count&1) emit_addimm_and_set_flags(2*(count+2),HOST_CCREG);
4913     idle=(int)out;
4914     //emit_subfrommem(&idlecount,HOST_CCREG); // Count idle cycles
4915     emit_andimm(HOST_CCREG,3,HOST_CCREG);
4916     jaddr=(int)out;
4917     emit_jmp(0);
4918   }
4919   else if(*adj==0||invert) {
4920     emit_addimm_and_set_flags(CLOCK_DIVIDER*(count+2),HOST_CCREG);
4921     jaddr=(int)out;
4922     emit_jns(0);
4923   }
4924   else
4925   {
4926     emit_cmpimm(HOST_CCREG,-CLOCK_DIVIDER*(count+2));
4927     jaddr=(int)out;
4928     emit_jns(0);
4929   }
4930   add_stub(CC_STUB,jaddr,idle?idle:(int)out,(*adj==0||invert||idle)?0:(count+2),i,addr,taken,0);
4931 }
4932
4933 void do_ccstub(int n)
4934 {
4935   literal_pool(256);
4936   assem_debug("do_ccstub %x\n",start+stubs[n][4]*4);
4937   set_jump_target(stubs[n][1],(int)out);
4938   int i=stubs[n][4];
4939   if(stubs[n][6]==NULLDS) {
4940     // Delay slot instruction is nullified ("likely" branch)
4941     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
4942   }
4943   else if(stubs[n][6]!=TAKEN) {
4944     wb_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty);
4945   }
4946   else {
4947     if(internal_branch(branch_regs[i].is32,ba[i]))
4948       wb_needed_dirtys(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
4949   }
4950   if(stubs[n][5]!=-1)
4951   {
4952     // Save PC as return address
4953     emit_movimm(stubs[n][5],EAX);
4954     emit_writeword(EAX,(int)&pcaddr);
4955   }
4956   else
4957   {
4958     // Return address depends on which way the branch goes
4959     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
4960     {
4961       int s1l=get_reg(branch_regs[i].regmap,rs1[i]);
4962       int s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
4963       int s2l=get_reg(branch_regs[i].regmap,rs2[i]);
4964       int s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
4965       if(rs1[i]==0)
4966       {
4967         s1l=s2l;s1h=s2h;
4968         s2l=s2h=-1;
4969       }
4970       else if(rs2[i]==0)
4971       {
4972         s2l=s2h=-1;
4973       }
4974       if((branch_regs[i].is32>>rs1[i])&(branch_regs[i].is32>>rs2[i])&1) {
4975         s1h=s2h=-1;
4976       }
4977       assert(s1l>=0);
4978       #ifdef DESTRUCTIVE_WRITEBACK
4979       if(rs1[i]) {
4980         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs1[i])&1)
4981           emit_loadreg(rs1[i],s1l);
4982       } 
4983       else {
4984         if((branch_regs[i].dirty>>s1l)&(branch_regs[i].is32>>rs2[i])&1)
4985           emit_loadreg(rs2[i],s1l);
4986       }
4987       if(s2l>=0)
4988         if((branch_regs[i].dirty>>s2l)&(branch_regs[i].is32>>rs2[i])&1)
4989           emit_loadreg(rs2[i],s2l);
4990       #endif
4991       int hr=0;
4992       int addr=-1,alt=-1,ntaddr=-1;
4993       while(hr<HOST_REGS)
4994       {
4995         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
4996            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
4997            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
4998         {
4999           addr=hr++;break;
5000         }
5001         hr++;
5002       }
5003       while(hr<HOST_REGS)
5004       {
5005         if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5006            (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5007            (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5008         {
5009           alt=hr++;break;
5010         }
5011         hr++;
5012       }
5013       if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
5014       {
5015         while(hr<HOST_REGS)
5016         {
5017           if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
5018              (branch_regs[i].regmap[hr]&63)!=rs1[i] &&
5019              (branch_regs[i].regmap[hr]&63)!=rs2[i] )
5020           {
5021             ntaddr=hr;break;
5022           }
5023           hr++;
5024         }
5025         assert(hr<HOST_REGS);
5026       }
5027       if((opcode[i]&0x2f)==4) // BEQ
5028       {
5029         #ifdef HAVE_CMOV_IMM
5030         if(s1h<0) {
5031           if(s2l>=0) emit_cmp(s1l,s2l);
5032           else emit_test(s1l,s1l);
5033           emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
5034         }
5035         else
5036         #endif
5037         {
5038           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5039           if(s1h>=0) {
5040             if(s2h>=0) emit_cmp(s1h,s2h);
5041             else emit_test(s1h,s1h);
5042             emit_cmovne_reg(alt,addr);
5043           }
5044           if(s2l>=0) emit_cmp(s1l,s2l);
5045           else emit_test(s1l,s1l);
5046           emit_cmovne_reg(alt,addr);
5047         }
5048       }
5049       if((opcode[i]&0x2f)==5) // BNE
5050       {
5051         #ifdef HAVE_CMOV_IMM
5052         if(s1h<0) {
5053           if(s2l>=0) emit_cmp(s1l,s2l);
5054           else emit_test(s1l,s1l);
5055           emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
5056         }
5057         else
5058         #endif
5059         {
5060           emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
5061           if(s1h>=0) {
5062             if(s2h>=0) emit_cmp(s1h,s2h);
5063             else emit_test(s1h,s1h);
5064             emit_cmovne_reg(alt,addr);
5065           }
5066           if(s2l>=0) emit_cmp(s1l,s2l);
5067           else emit_test(s1l,s1l);
5068           emit_cmovne_reg(alt,addr);
5069         }
5070       }
5071       if((opcode[i]&0x2f)==6) // BLEZ
5072       {
5073         //emit_movimm(ba[i],alt);
5074         //emit_movimm(start+i*4+8,addr);
5075         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5076         emit_cmpimm(s1l,1);
5077         if(s1h>=0) emit_mov(addr,ntaddr);
5078         emit_cmovl_reg(alt,addr);
5079         if(s1h>=0) {
5080           emit_test(s1h,s1h);
5081           emit_cmovne_reg(ntaddr,addr);
5082           emit_cmovs_reg(alt,addr);
5083         }
5084       }
5085       if((opcode[i]&0x2f)==7) // BGTZ
5086       {
5087         //emit_movimm(ba[i],addr);
5088         //emit_movimm(start+i*4+8,ntaddr);
5089         emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
5090         emit_cmpimm(s1l,1);
5091         if(s1h>=0) emit_mov(addr,alt);
5092         emit_cmovl_reg(ntaddr,addr);
5093         if(s1h>=0) {
5094           emit_test(s1h,s1h);
5095           emit_cmovne_reg(alt,addr);
5096           emit_cmovs_reg(ntaddr,addr);
5097         }
5098       }
5099       if((opcode[i]==1)&&(opcode2[i]&0x2D)==0) // BLTZ
5100       {
5101         //emit_movimm(ba[i],alt);
5102         //emit_movimm(start+i*4+8,addr);
5103         emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5104         if(s1h>=0) emit_test(s1h,s1h);
5105         else emit_test(s1l,s1l);
5106         emit_cmovs_reg(alt,addr);
5107       }
5108       if((opcode[i]==1)&&(opcode2[i]&0x2D)==1) // BGEZ
5109       {
5110         //emit_movimm(ba[i],addr);
5111         //emit_movimm(start+i*4+8,alt);
5112         emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5113         if(s1h>=0) emit_test(s1h,s1h);
5114         else emit_test(s1l,s1l);
5115         emit_cmovs_reg(alt,addr);
5116       }
5117       if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
5118         if(source[i]&0x10000) // BC1T
5119         {
5120           //emit_movimm(ba[i],alt);
5121           //emit_movimm(start+i*4+8,addr);
5122           emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
5123           emit_testimm(s1l,0x800000);
5124           emit_cmovne_reg(alt,addr);
5125         }
5126         else // BC1F
5127         {
5128           //emit_movimm(ba[i],addr);
5129           //emit_movimm(start+i*4+8,alt);
5130           emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
5131           emit_testimm(s1l,0x800000);
5132           emit_cmovne_reg(alt,addr);
5133         }
5134       }
5135       emit_writeword(addr,(int)&pcaddr);
5136     }
5137     else
5138     if(itype[i]==RJUMP)
5139     {
5140       int r=get_reg(branch_regs[i].regmap,rs1[i]);
5141       if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5142         r=get_reg(branch_regs[i].regmap,RTEMP);
5143       }
5144       emit_writeword(r,(int)&pcaddr);
5145     }
5146     else {printf("Unknown branch type in do_ccstub\n");exit(1);}
5147   }
5148   // Update cycle count
5149   assert(branch_regs[i].regmap[HOST_CCREG]==CCREG||branch_regs[i].regmap[HOST_CCREG]==-1);
5150   if(stubs[n][3]) emit_addimm(HOST_CCREG,CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5151   emit_call((int)cc_interrupt);
5152   if(stubs[n][3]) emit_addimm(HOST_CCREG,-CLOCK_DIVIDER*stubs[n][3],HOST_CCREG);
5153   if(stubs[n][6]==TAKEN) {
5154     if(internal_branch(branch_regs[i].is32,ba[i]))
5155       load_needed_regs(branch_regs[i].regmap,regs[(ba[i]-start)>>2].regmap_entry);
5156     else if(itype[i]==RJUMP) {
5157       if(get_reg(branch_regs[i].regmap,RTEMP)>=0)
5158         emit_readword((int)&pcaddr,get_reg(branch_regs[i].regmap,RTEMP));
5159       else
5160         emit_loadreg(rs1[i],get_reg(branch_regs[i].regmap,rs1[i]));
5161     }
5162   }else if(stubs[n][6]==NOTTAKEN) {
5163     if(i<slen-2) load_needed_regs(branch_regs[i].regmap,regmap_pre[i+2]);
5164     else load_all_regs(branch_regs[i].regmap);
5165   }else if(stubs[n][6]==NULLDS) {
5166     // Delay slot instruction is nullified ("likely" branch)
5167     if(i<slen-2) load_needed_regs(regs[i].regmap,regmap_pre[i+2]);
5168     else load_all_regs(regs[i].regmap);
5169   }else{
5170     load_all_regs(branch_regs[i].regmap);
5171   }
5172   emit_jmp(stubs[n][2]); // return address
5173   
5174   /* This works but uses a lot of memory...
5175   emit_readword((int)&last_count,ECX);
5176   emit_add(HOST_CCREG,ECX,EAX);
5177   emit_writeword(EAX,(int)&Count);
5178   emit_call((int)gen_interupt);
5179   emit_readword((int)&Count,HOST_CCREG);
5180   emit_readword((int)&next_interupt,EAX);
5181   emit_readword((int)&pending_exception,EBX);
5182   emit_writeword(EAX,(int)&last_count);
5183   emit_sub(HOST_CCREG,EAX,HOST_CCREG);
5184   emit_test(EBX,EBX);
5185   int jne_instr=(int)out;
5186   emit_jne(0);
5187   if(stubs[n][3]) emit_addimm(HOST_CCREG,-2*stubs[n][3],HOST_CCREG);
5188   load_all_regs(branch_regs[i].regmap);
5189   emit_jmp(stubs[n][2]); // return address
5190   set_jump_target(jne_instr,(int)out);
5191   emit_readword((int)&pcaddr,EAX);
5192   // Call get_addr_ht instead of doing the hash table here.
5193   // This code is executed infrequently and takes up a lot of space
5194   // so smaller is better.
5195   emit_storereg(CCREG,HOST_CCREG);
5196   emit_pushreg(EAX);
5197   emit_call((int)get_addr_ht);
5198   emit_loadreg(CCREG,HOST_CCREG);
5199   emit_addimm(ESP,4,ESP);
5200   emit_jmpreg(EAX);*/
5201 }
5202
5203 add_to_linker(int addr,int target,int ext)
5204 {
5205   link_addr[linkcount][0]=addr;
5206   link_addr[linkcount][1]=target;
5207   link_addr[linkcount][2]=ext;  
5208   linkcount++;
5209 }
5210
5211 static void ujump_assemble_write_ra(int i)
5212 {
5213   int rt;
5214   unsigned int return_address;
5215   rt=get_reg(branch_regs[i].regmap,31);
5216   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5217   //assert(rt>=0);
5218   return_address=start+i*4+8;
5219   if(rt>=0) {
5220     #ifdef USE_MINI_HT
5221     if(internal_branch(branch_regs[i].is32,return_address)&&rt1[i+1]!=31) {
5222       int temp=-1; // note: must be ds-safe
5223       #ifdef HOST_TEMPREG
5224       temp=HOST_TEMPREG;
5225       #endif
5226       if(temp>=0) do_miniht_insert(return_address,rt,temp);
5227       else emit_movimm(return_address,rt);
5228     }
5229     else
5230     #endif
5231     {
5232       #ifdef REG_PREFETCH
5233       if(temp>=0) 
5234       {
5235         if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5236       }
5237       #endif
5238       emit_movimm(return_address,rt); // PC into link register
5239       #ifdef IMM_PREFETCH
5240       emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5241       #endif
5242     }
5243   }
5244 }
5245
5246 void ujump_assemble(int i,struct regstat *i_regs)
5247 {
5248   signed char *i_regmap=i_regs->regmap;
5249   int ra_done=0;
5250   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5251   address_generation(i+1,i_regs,regs[i].regmap_entry);
5252   #ifdef REG_PREFETCH
5253   int temp=get_reg(branch_regs[i].regmap,PTEMP);
5254   if(rt1[i]==31&&temp>=0) 
5255   {
5256     int return_address=start+i*4+8;
5257     if(get_reg(branch_regs[i].regmap,31)>0) 
5258     if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5259   }
5260   #endif
5261   if(rt1[i]==31&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5262     ujump_assemble_write_ra(i); // writeback ra for DS
5263     ra_done=1;
5264   }
5265   ds_assemble(i+1,i_regs);
5266   uint64_t bc_unneeded=branch_regs[i].u;
5267   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5268   bc_unneeded|=1|(1LL<<rt1[i]);
5269   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5270   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5271                 bc_unneeded,bc_unneeded_upper);
5272   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5273   if(!ra_done&&rt1[i]==31)
5274     ujump_assemble_write_ra(i);
5275   int cc,adj;
5276   cc=get_reg(branch_regs[i].regmap,CCREG);
5277   assert(cc==HOST_CCREG);
5278   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5279   #ifdef REG_PREFETCH
5280   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5281   #endif
5282   do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5283   if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5284   load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5285   if(internal_branch(branch_regs[i].is32,ba[i]))
5286     assem_debug("branch: internal\n");
5287   else
5288     assem_debug("branch: external\n");
5289   if(internal_branch(branch_regs[i].is32,ba[i])&&is_ds[(ba[i]-start)>>2]) {
5290     ds_assemble_entry(i);
5291   }
5292   else {
5293     add_to_linker((int)out,ba[i],internal_branch(branch_regs[i].is32,ba[i]));
5294     emit_jmp(0);
5295   }
5296 }
5297
5298 static void rjump_assemble_write_ra(int i)
5299 {
5300   int rt,return_address;
5301   assert(rt1[i+1]!=rt1[i]);
5302   assert(rt2[i+1]!=rt1[i]);
5303   rt=get_reg(branch_regs[i].regmap,rt1[i]);
5304   assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5305   assert(rt>=0);
5306   return_address=start+i*4+8;
5307   #ifdef REG_PREFETCH
5308   if(temp>=0) 
5309   {
5310     if(i_regmap[temp]!=PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5311   }
5312   #endif
5313   emit_movimm(return_address,rt); // PC into link register
5314   #ifdef IMM_PREFETCH
5315   emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5316   #endif
5317 }
5318
5319 void rjump_assemble(int i,struct regstat *i_regs)
5320 {
5321   signed char *i_regmap=i_regs->regmap;
5322   int temp;
5323   int rs,cc,adj;
5324   int ra_done=0;
5325   rs=get_reg(branch_regs[i].regmap,rs1[i]);
5326   assert(rs>=0);
5327   if(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]) {
5328     // Delay slot abuse, make a copy of the branch address register
5329     temp=get_reg(branch_regs[i].regmap,RTEMP);
5330     assert(temp>=0);
5331     assert(regs[i].regmap[temp]==RTEMP);
5332     emit_mov(rs,temp);
5333     rs=temp;
5334   }
5335   address_generation(i+1,i_regs,regs[i].regmap_entry);
5336   #ifdef REG_PREFETCH
5337   if(rt1[i]==31) 
5338   {
5339     if((temp=get_reg(branch_regs[i].regmap,PTEMP))>=0) {
5340       int return_address=start+i*4+8;
5341       if(i_regmap[temp]==PTEMP) emit_movimm((int)hash_table[((return_address>>16)^return_address)&0xFFFF],temp);
5342     }
5343   }
5344   #endif
5345   #ifdef USE_MINI_HT
5346   if(rs1[i]==31) {
5347     int rh=get_reg(regs[i].regmap,RHASH);
5348     if(rh>=0) do_preload_rhash(rh);
5349   }
5350   #endif
5351   if(rt1[i]!=0&&(rt1[i]==rs1[i+1]||rt1[i]==rs2[i+1])) {
5352     rjump_assemble_write_ra(i);
5353     ra_done=1;
5354   }
5355   ds_assemble(i+1,i_regs);
5356   uint64_t bc_unneeded=branch_regs[i].u;
5357   uint64_t bc_unneeded_upper=branch_regs[i].uu;
5358   bc_unneeded|=1|(1LL<<rt1[i]);
5359   bc_unneeded_upper|=1|(1LL<<rt1[i]);
5360   bc_unneeded&=~(1LL<<rs1[i]);
5361   wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5362                 bc_unneeded,bc_unneeded_upper);
5363   load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],CCREG);
5364   if(!ra_done&&rt1[i]!=0)
5365     rjump_assemble_write_ra(i);
5366   cc=get_reg(branch_regs[i].regmap,CCREG);
5367   assert(cc==HOST_CCREG);
5368   #ifdef USE_MINI_HT
5369   int rh=get_reg(branch_regs[i].regmap,RHASH);
5370   int ht=get_reg(branch_regs[i].regmap,RHTBL);
5371   if(rs1[i]==31) {
5372     if(regs[i].regmap[rh]!=RHASH) do_preload_rhash(rh);
5373     do_preload_rhtbl(ht);
5374     do_rhash(rs,rh);
5375   }
5376   #endif
5377   store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5378   #ifdef DESTRUCTIVE_WRITEBACK
5379   if((branch_regs[i].dirty>>rs)&(branch_regs[i].is32>>rs1[i])&1) {
5380     if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
5381       emit_loadreg(rs1[i],rs);
5382     }
5383   }
5384   #endif
5385   #ifdef REG_PREFETCH
5386   if(rt1[i]==31&&temp>=0) emit_prefetchreg(temp);
5387   #endif
5388   #ifdef USE_MINI_HT
5389   if(rs1[i]==31) {
5390     do_miniht_load(ht,rh);
5391   }
5392   #endif
5393   //do_cc(i,branch_regs[i].regmap,&adj,-1,TAKEN);
5394   //if(adj) emit_addimm(cc,2*(ccadj[i]+2-adj),cc); // ??? - Shouldn't happen
5395   //assert(adj==0);
5396   emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5397   add_stub(CC_STUB,(int)out,jump_vaddr_reg[rs],0,i,-1,TAKEN,0);
5398   emit_jns(0);
5399   //load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,-1);
5400   #ifdef USE_MINI_HT
5401   if(rs1[i]==31) {
5402     do_miniht_jump(rs,rh,ht);
5403   }
5404   else
5405   #endif
5406   {
5407     //if(rs!=EAX) emit_mov(rs,EAX);
5408     //emit_jmp((int)jump_vaddr_eax);
5409     emit_jmp(jump_vaddr_reg[rs]);
5410   }
5411   /* Check hash table
5412   temp=!rs;
5413   emit_mov(rs,temp);
5414   emit_shrimm(rs,16,rs);
5415   emit_xor(temp,rs,rs);
5416   emit_movzwl_reg(rs,rs);
5417   emit_shlimm(rs,4,rs);
5418   emit_cmpmem_indexed((int)hash_table,rs,temp);
5419   emit_jne((int)out+14);
5420   emit_readword_indexed((int)hash_table+4,rs,rs);
5421   emit_jmpreg(rs);
5422   emit_cmpmem_indexed((int)hash_table+8,rs,temp);
5423   emit_addimm_no_flags(8,rs);
5424   emit_jeq((int)out-17);
5425   // No hit on hash table, call compiler
5426   emit_pushreg(temp);
5427 //DEBUG >
5428 #ifdef DEBUG_CYCLE_COUNT
5429   emit_readword((int)&last_count,ECX);
5430   emit_add(HOST_CCREG,ECX,HOST_CCREG);
5431   emit_readword((int)&next_interupt,ECX);
5432   emit_writeword(HOST_CCREG,(int)&Count);
5433   emit_sub(HOST_CCREG,ECX,HOST_CCREG);
5434   emit_writeword(ECX,(int)&last_count);
5435 #endif
5436 //DEBUG <
5437   emit_storereg(CCREG,HOST_CCREG);
5438   emit_call((int)get_addr);
5439   emit_loadreg(CCREG,HOST_CCREG);
5440   emit_addimm(ESP,4,ESP);
5441   emit_jmpreg(EAX);*/
5442   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5443   if(rt1[i]!=31&&i<slen-2&&(((u_int)out)&7)) emit_mov(13,13);
5444   #endif
5445 }
5446
5447 void cjump_assemble(int i,struct regstat *i_regs)
5448 {
5449   signed char *i_regmap=i_regs->regmap;
5450   int cc;
5451   int match;
5452   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5453   assem_debug("match=%d\n",match);
5454   int s1h,s1l,s2h,s2l;
5455   int prev_cop1_usable=cop1_usable;
5456   int unconditional=0,nop=0;
5457   int only32=0;
5458   int invert=0;
5459   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5460   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5461   if(!match) invert=1;
5462   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5463   if(i>(ba[i]-start)>>2) invert=1;
5464   #endif
5465   
5466   if(ooo[i]) {
5467     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5468     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5469     s2l=get_reg(branch_regs[i].regmap,rs2[i]);
5470     s2h=get_reg(branch_regs[i].regmap,rs2[i]|64);
5471   }
5472   else {
5473     s1l=get_reg(i_regmap,rs1[i]);
5474     s1h=get_reg(i_regmap,rs1[i]|64);
5475     s2l=get_reg(i_regmap,rs2[i]);
5476     s2h=get_reg(i_regmap,rs2[i]|64);
5477   }
5478   if(rs1[i]==0&&rs2[i]==0)
5479   {
5480     if(opcode[i]&1) nop=1;
5481     else unconditional=1;
5482     //assert(opcode[i]!=5);
5483     //assert(opcode[i]!=7);
5484     //assert(opcode[i]!=0x15);
5485     //assert(opcode[i]!=0x17);
5486   }
5487   else if(rs1[i]==0)
5488   {
5489     s1l=s2l;s1h=s2h;
5490     s2l=s2h=-1;
5491     only32=(regs[i].was32>>rs2[i])&1;
5492   }
5493   else if(rs2[i]==0)
5494   {
5495     s2l=s2h=-1;
5496     only32=(regs[i].was32>>rs1[i])&1;
5497   }
5498   else {
5499     only32=(regs[i].was32>>rs1[i])&(regs[i].was32>>rs2[i])&1;
5500   }
5501
5502   if(ooo[i]) {
5503     // Out of order execution (delay slot first)
5504     //printf("OOOE\n");
5505     address_generation(i+1,i_regs,regs[i].regmap_entry);
5506     ds_assemble(i+1,i_regs);
5507     int adj;
5508     uint64_t bc_unneeded=branch_regs[i].u;
5509     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5510     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5511     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5512     bc_unneeded|=1;
5513     bc_unneeded_upper|=1;
5514     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5515                   bc_unneeded,bc_unneeded_upper);
5516     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
5517     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5518     cc=get_reg(branch_regs[i].regmap,CCREG);
5519     assert(cc==HOST_CCREG);
5520     if(unconditional) 
5521       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5522     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5523     //assem_debug("cycle count (adj)\n");
5524     if(unconditional) {
5525       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5526       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5527         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5528         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5529         if(internal)
5530           assem_debug("branch: internal\n");
5531         else
5532           assem_debug("branch: external\n");
5533         if(internal&&is_ds[(ba[i]-start)>>2]) {
5534           ds_assemble_entry(i);
5535         }
5536         else {
5537           add_to_linker((int)out,ba[i],internal);
5538           emit_jmp(0);
5539         }
5540         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5541         if(((u_int)out)&7) emit_addnop(0);
5542         #endif
5543       }
5544     }
5545     else if(nop) {
5546       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5547       int jaddr=(int)out;
5548       emit_jns(0);
5549       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5550     }
5551     else {
5552       int taken=0,nottaken=0,nottaken1=0;
5553       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5554       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5555       if(!only32)
5556       {
5557         assert(s1h>=0);
5558         if(opcode[i]==4) // BEQ
5559         {
5560           if(s2h>=0) emit_cmp(s1h,s2h);
5561           else emit_test(s1h,s1h);
5562           nottaken1=(int)out;
5563           emit_jne(1);
5564         }
5565         if(opcode[i]==5) // BNE
5566         {
5567           if(s2h>=0) emit_cmp(s1h,s2h);
5568           else emit_test(s1h,s1h);
5569           if(invert) taken=(int)out;
5570           else add_to_linker((int)out,ba[i],internal);
5571           emit_jne(0);
5572         }
5573         if(opcode[i]==6) // BLEZ
5574         {
5575           emit_test(s1h,s1h);
5576           if(invert) taken=(int)out;
5577           else add_to_linker((int)out,ba[i],internal);
5578           emit_js(0);
5579           nottaken1=(int)out;
5580           emit_jne(1);
5581         }
5582         if(opcode[i]==7) // BGTZ
5583         {
5584           emit_test(s1h,s1h);
5585           nottaken1=(int)out;
5586           emit_js(1);
5587           if(invert) taken=(int)out;
5588           else add_to_linker((int)out,ba[i],internal);
5589           emit_jne(0);
5590         }
5591       } // if(!only32)
5592           
5593       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5594       assert(s1l>=0);
5595       if(opcode[i]==4) // BEQ
5596       {
5597         if(s2l>=0) emit_cmp(s1l,s2l);
5598         else emit_test(s1l,s1l);
5599         if(invert){
5600           nottaken=(int)out;
5601           emit_jne(1);
5602         }else{
5603           add_to_linker((int)out,ba[i],internal);
5604           emit_jeq(0);
5605         }
5606       }
5607       if(opcode[i]==5) // BNE
5608       {
5609         if(s2l>=0) emit_cmp(s1l,s2l);
5610         else emit_test(s1l,s1l);
5611         if(invert){
5612           nottaken=(int)out;
5613           emit_jeq(1);
5614         }else{
5615           add_to_linker((int)out,ba[i],internal);
5616           emit_jne(0);
5617         }
5618       }
5619       if(opcode[i]==6) // BLEZ
5620       {
5621         emit_cmpimm(s1l,1);
5622         if(invert){
5623           nottaken=(int)out;
5624           emit_jge(1);
5625         }else{
5626           add_to_linker((int)out,ba[i],internal);
5627           emit_jl(0);
5628         }
5629       }
5630       if(opcode[i]==7) // BGTZ
5631       {
5632         emit_cmpimm(s1l,1);
5633         if(invert){
5634           nottaken=(int)out;
5635           emit_jl(1);
5636         }else{
5637           add_to_linker((int)out,ba[i],internal);
5638           emit_jge(0);
5639         }
5640       }
5641       if(invert) {
5642         if(taken) set_jump_target(taken,(int)out);
5643         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5644         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5645           if(adj) {
5646             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5647             add_to_linker((int)out,ba[i],internal);
5648           }else{
5649             emit_addnop(13);
5650             add_to_linker((int)out,ba[i],internal*2);
5651           }
5652           emit_jmp(0);
5653         }else
5654         #endif
5655         {
5656           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5657           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5658           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5659           if(internal)
5660             assem_debug("branch: internal\n");
5661           else
5662             assem_debug("branch: external\n");
5663           if(internal&&is_ds[(ba[i]-start)>>2]) {
5664             ds_assemble_entry(i);
5665           }
5666           else {
5667             add_to_linker((int)out,ba[i],internal);
5668             emit_jmp(0);
5669           }
5670         }
5671         set_jump_target(nottaken,(int)out);
5672       }
5673
5674       if(nottaken1) set_jump_target(nottaken1,(int)out);
5675       if(adj) {
5676         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
5677       }
5678     } // (!unconditional)
5679   } // if(ooo)
5680   else
5681   {
5682     // In-order execution (branch first)
5683     //if(likely[i]) printf("IOL\n");
5684     //else
5685     //printf("IOE\n");
5686     int taken=0,nottaken=0,nottaken1=0;
5687     if(!unconditional&&!nop) {
5688       if(!only32)
5689       {
5690         assert(s1h>=0);
5691         if((opcode[i]&0x2f)==4) // BEQ
5692         {
5693           if(s2h>=0) emit_cmp(s1h,s2h);
5694           else emit_test(s1h,s1h);
5695           nottaken1=(int)out;
5696           emit_jne(2);
5697         }
5698         if((opcode[i]&0x2f)==5) // BNE
5699         {
5700           if(s2h>=0) emit_cmp(s1h,s2h);
5701           else emit_test(s1h,s1h);
5702           taken=(int)out;
5703           emit_jne(1);
5704         }
5705         if((opcode[i]&0x2f)==6) // BLEZ
5706         {
5707           emit_test(s1h,s1h);
5708           taken=(int)out;
5709           emit_js(1);
5710           nottaken1=(int)out;
5711           emit_jne(2);
5712         }
5713         if((opcode[i]&0x2f)==7) // BGTZ
5714         {
5715           emit_test(s1h,s1h);
5716           nottaken1=(int)out;
5717           emit_js(2);
5718           taken=(int)out;
5719           emit_jne(1);
5720         }
5721       } // if(!only32)
5722           
5723       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5724       assert(s1l>=0);
5725       if((opcode[i]&0x2f)==4) // BEQ
5726       {
5727         if(s2l>=0) emit_cmp(s1l,s2l);
5728         else emit_test(s1l,s1l);
5729         nottaken=(int)out;
5730         emit_jne(2);
5731       }
5732       if((opcode[i]&0x2f)==5) // BNE
5733       {
5734         if(s2l>=0) emit_cmp(s1l,s2l);
5735         else emit_test(s1l,s1l);
5736         nottaken=(int)out;
5737         emit_jeq(2);
5738       }
5739       if((opcode[i]&0x2f)==6) // BLEZ
5740       {
5741         emit_cmpimm(s1l,1);
5742         nottaken=(int)out;
5743         emit_jge(2);
5744       }
5745       if((opcode[i]&0x2f)==7) // BGTZ
5746       {
5747         emit_cmpimm(s1l,1);
5748         nottaken=(int)out;
5749         emit_jl(2);
5750       }
5751     } // if(!unconditional)
5752     int adj;
5753     uint64_t ds_unneeded=branch_regs[i].u;
5754     uint64_t ds_unneeded_upper=branch_regs[i].uu;
5755     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
5756     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
5757     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
5758     ds_unneeded|=1;
5759     ds_unneeded_upper|=1;
5760     // branch taken
5761     if(!nop) {
5762       if(taken) set_jump_target(taken,(int)out);
5763       assem_debug("1:\n");
5764       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5765                     ds_unneeded,ds_unneeded_upper);
5766       // load regs
5767       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5768       address_generation(i+1,&branch_regs[i],0);
5769       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
5770       ds_assemble(i+1,&branch_regs[i]);
5771       cc=get_reg(branch_regs[i].regmap,CCREG);
5772       if(cc==-1) {
5773         emit_loadreg(CCREG,cc=HOST_CCREG);
5774         // CHECK: Is the following instruction (fall thru) allocated ok?
5775       }
5776       assert(cc==HOST_CCREG);
5777       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5778       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
5779       assem_debug("cycle count (adj)\n");
5780       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5781       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5782       if(internal)
5783         assem_debug("branch: internal\n");
5784       else
5785         assem_debug("branch: external\n");
5786       if(internal&&is_ds[(ba[i]-start)>>2]) {
5787         ds_assemble_entry(i);
5788       }
5789       else {
5790         add_to_linker((int)out,ba[i],internal);
5791         emit_jmp(0);
5792       }
5793     }
5794     // branch not taken
5795     cop1_usable=prev_cop1_usable;
5796     if(!unconditional) {
5797       if(nottaken1) set_jump_target(nottaken1,(int)out);
5798       set_jump_target(nottaken,(int)out);
5799       assem_debug("2:\n");
5800       if(!likely[i]) {
5801         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5802                       ds_unneeded,ds_unneeded_upper);
5803         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
5804         address_generation(i+1,&branch_regs[i],0);
5805         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5806         ds_assemble(i+1,&branch_regs[i]);
5807       }
5808       cc=get_reg(branch_regs[i].regmap,CCREG);
5809       if(cc==-1&&!likely[i]) {
5810         // Cycle count isn't in a register, temporarily load it then write it out
5811         emit_loadreg(CCREG,HOST_CCREG);
5812         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
5813         int jaddr=(int)out;
5814         emit_jns(0);
5815         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5816         emit_storereg(CCREG,HOST_CCREG);
5817       }
5818       else{
5819         cc=get_reg(i_regmap,CCREG);
5820         assert(cc==HOST_CCREG);
5821         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5822         int jaddr=(int)out;
5823         emit_jns(0);
5824         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
5825       }
5826     }
5827   }
5828 }
5829
5830 void sjump_assemble(int i,struct regstat *i_regs)
5831 {
5832   signed char *i_regmap=i_regs->regmap;
5833   int cc;
5834   int match;
5835   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5836   assem_debug("smatch=%d\n",match);
5837   int s1h,s1l;
5838   int prev_cop1_usable=cop1_usable;
5839   int unconditional=0,nevertaken=0;
5840   int only32=0;
5841   int invert=0;
5842   int internal=internal_branch(branch_regs[i].is32,ba[i]);
5843   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
5844   if(!match) invert=1;
5845   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5846   if(i>(ba[i]-start)>>2) invert=1;
5847   #endif
5848
5849   //if(opcode2[i]>=0x10) return; // FIXME (BxxZAL)
5850   //assert(opcode2[i]<0x10||rs1[i]==0); // FIXME (BxxZAL)
5851
5852   if(ooo[i]) {
5853     s1l=get_reg(branch_regs[i].regmap,rs1[i]);
5854     s1h=get_reg(branch_regs[i].regmap,rs1[i]|64);
5855   }
5856   else {
5857     s1l=get_reg(i_regmap,rs1[i]);
5858     s1h=get_reg(i_regmap,rs1[i]|64);
5859   }
5860   if(rs1[i]==0)
5861   {
5862     if(opcode2[i]&1) unconditional=1;
5863     else nevertaken=1;
5864     // These are never taken (r0 is never less than zero)
5865     //assert(opcode2[i]!=0);
5866     //assert(opcode2[i]!=2);
5867     //assert(opcode2[i]!=0x10);
5868     //assert(opcode2[i]!=0x12);
5869   }
5870   else {
5871     only32=(regs[i].was32>>rs1[i])&1;
5872   }
5873
5874   if(ooo[i]) {
5875     // Out of order execution (delay slot first)
5876     //printf("OOOE\n");
5877     address_generation(i+1,i_regs,regs[i].regmap_entry);
5878     ds_assemble(i+1,i_regs);
5879     int adj;
5880     uint64_t bc_unneeded=branch_regs[i].u;
5881     uint64_t bc_unneeded_upper=branch_regs[i].uu;
5882     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
5883     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
5884     bc_unneeded|=1;
5885     bc_unneeded_upper|=1;
5886     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
5887                   bc_unneeded,bc_unneeded_upper);
5888     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
5889     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
5890     if(rt1[i]==31) {
5891       int rt,return_address;
5892       rt=get_reg(branch_regs[i].regmap,31);
5893       assem_debug("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
5894       if(rt>=0) {
5895         // Save the PC even if the branch is not taken
5896         return_address=start+i*4+8;
5897         emit_movimm(return_address,rt); // PC into link register
5898         #ifdef IMM_PREFETCH
5899         if(!nevertaken) emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
5900         #endif
5901       }
5902     }
5903     cc=get_reg(branch_regs[i].regmap,CCREG);
5904     assert(cc==HOST_CCREG);
5905     if(unconditional) 
5906       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5907     //do_cc(i,branch_regs[i].regmap,&adj,unconditional?ba[i]:-1,unconditional);
5908     assem_debug("cycle count (adj)\n");
5909     if(unconditional) {
5910       do_cc(i,branch_regs[i].regmap,&adj,ba[i],TAKEN,0);
5911       if(i!=(ba[i]-start)>>2 || source[i+1]!=0) {
5912         if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5913         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
5914         if(internal)
5915           assem_debug("branch: internal\n");
5916         else
5917           assem_debug("branch: external\n");
5918         if(internal&&is_ds[(ba[i]-start)>>2]) {
5919           ds_assemble_entry(i);
5920         }
5921         else {
5922           add_to_linker((int)out,ba[i],internal);
5923           emit_jmp(0);
5924         }
5925         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5926         if(((u_int)out)&7) emit_addnop(0);
5927         #endif
5928       }
5929     }
5930     else if(nevertaken) {
5931       emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
5932       int jaddr=(int)out;
5933       emit_jns(0);
5934       add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
5935     }
5936     else {
5937       int nottaken=0;
5938       do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
5939       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
5940       if(!only32)
5941       {
5942         assert(s1h>=0);
5943         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5944         {
5945           emit_test(s1h,s1h);
5946           if(invert){
5947             nottaken=(int)out;
5948             emit_jns(1);
5949           }else{
5950             add_to_linker((int)out,ba[i],internal);
5951             emit_js(0);
5952           }
5953         }
5954         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5955         {
5956           emit_test(s1h,s1h);
5957           if(invert){
5958             nottaken=(int)out;
5959             emit_js(1);
5960           }else{
5961             add_to_linker((int)out,ba[i],internal);
5962             emit_jns(0);
5963           }
5964         }
5965       } // if(!only32)
5966       else
5967       {
5968         assert(s1l>=0);
5969         if((opcode2[i]&0xf)==0) // BLTZ/BLTZAL
5970         {
5971           emit_test(s1l,s1l);
5972           if(invert){
5973             nottaken=(int)out;
5974             emit_jns(1);
5975           }else{
5976             add_to_linker((int)out,ba[i],internal);
5977             emit_js(0);
5978           }
5979         }
5980         if((opcode2[i]&0xf)==1) // BGEZ/BLTZAL
5981         {
5982           emit_test(s1l,s1l);
5983           if(invert){
5984             nottaken=(int)out;
5985             emit_js(1);
5986           }else{
5987             add_to_linker((int)out,ba[i],internal);
5988             emit_jns(0);
5989           }
5990         }
5991       } // if(!only32)
5992           
5993       if(invert) {
5994         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
5995         if(match&&(!internal||!is_ds[(ba[i]-start)>>2])) {
5996           if(adj) {
5997             emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
5998             add_to_linker((int)out,ba[i],internal);
5999           }else{
6000             emit_addnop(13);
6001             add_to_linker((int)out,ba[i],internal*2);
6002           }
6003           emit_jmp(0);
6004         }else
6005         #endif
6006         {
6007           if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6008           store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6009           load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6010           if(internal)
6011             assem_debug("branch: internal\n");
6012           else
6013             assem_debug("branch: external\n");
6014           if(internal&&is_ds[(ba[i]-start)>>2]) {
6015             ds_assemble_entry(i);
6016           }
6017           else {
6018             add_to_linker((int)out,ba[i],internal);
6019             emit_jmp(0);
6020           }
6021         }
6022         set_jump_target(nottaken,(int)out);
6023       }
6024
6025       if(adj) {
6026         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6027       }
6028     } // (!unconditional)
6029   } // if(ooo)
6030   else
6031   {
6032     // In-order execution (branch first)
6033     //printf("IOE\n");
6034     int nottaken=0;
6035     if(rt1[i]==31) {
6036       int rt,return_address;
6037       rt=get_reg(branch_regs[i].regmap,31);
6038       if(rt>=0) {
6039         // Save the PC even if the branch is not taken
6040         return_address=start+i*4+8;
6041         emit_movimm(return_address,rt); // PC into link register
6042         #ifdef IMM_PREFETCH
6043         emit_prefetch(hash_table[((return_address>>16)^return_address)&0xFFFF]);
6044         #endif
6045       }
6046     }
6047     if(!unconditional) {
6048       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6049       if(!only32)
6050       {
6051         assert(s1h>=0);
6052         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6053         {
6054           emit_test(s1h,s1h);
6055           nottaken=(int)out;
6056           emit_jns(1);
6057         }
6058         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6059         {
6060           emit_test(s1h,s1h);
6061           nottaken=(int)out;
6062           emit_js(1);
6063         }
6064       } // if(!only32)
6065       else
6066       {
6067         assert(s1l>=0);
6068         if((opcode2[i]&0x0d)==0) // BLTZ/BLTZL/BLTZAL/BLTZALL
6069         {
6070           emit_test(s1l,s1l);
6071           nottaken=(int)out;
6072           emit_jns(1);
6073         }
6074         if((opcode2[i]&0x0d)==1) // BGEZ/BGEZL/BGEZAL/BGEZALL
6075         {
6076           emit_test(s1l,s1l);
6077           nottaken=(int)out;
6078           emit_js(1);
6079         }
6080       }
6081     } // if(!unconditional)
6082     int adj;
6083     uint64_t ds_unneeded=branch_regs[i].u;
6084     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6085     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6086     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6087     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6088     ds_unneeded|=1;
6089     ds_unneeded_upper|=1;
6090     // branch taken
6091     if(!nevertaken) {
6092       //assem_debug("1:\n");
6093       wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6094                     ds_unneeded,ds_unneeded_upper);
6095       // load regs
6096       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6097       address_generation(i+1,&branch_regs[i],0);
6098       load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6099       ds_assemble(i+1,&branch_regs[i]);
6100       cc=get_reg(branch_regs[i].regmap,CCREG);
6101       if(cc==-1) {
6102         emit_loadreg(CCREG,cc=HOST_CCREG);
6103         // CHECK: Is the following instruction (fall thru) allocated ok?
6104       }
6105       assert(cc==HOST_CCREG);
6106       store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6107       do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6108       assem_debug("cycle count (adj)\n");
6109       if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6110       load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6111       if(internal)
6112         assem_debug("branch: internal\n");
6113       else
6114         assem_debug("branch: external\n");
6115       if(internal&&is_ds[(ba[i]-start)>>2]) {
6116         ds_assemble_entry(i);
6117       }
6118       else {
6119         add_to_linker((int)out,ba[i],internal);
6120         emit_jmp(0);
6121       }
6122     }
6123     // branch not taken
6124     cop1_usable=prev_cop1_usable;
6125     if(!unconditional) {
6126       set_jump_target(nottaken,(int)out);
6127       assem_debug("1:\n");
6128       if(!likely[i]) {
6129         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6130                       ds_unneeded,ds_unneeded_upper);
6131         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6132         address_generation(i+1,&branch_regs[i],0);
6133         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6134         ds_assemble(i+1,&branch_regs[i]);
6135       }
6136       cc=get_reg(branch_regs[i].regmap,CCREG);
6137       if(cc==-1&&!likely[i]) {
6138         // Cycle count isn't in a register, temporarily load it then write it out
6139         emit_loadreg(CCREG,HOST_CCREG);
6140         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6141         int jaddr=(int)out;
6142         emit_jns(0);
6143         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6144         emit_storereg(CCREG,HOST_CCREG);
6145       }
6146       else{
6147         cc=get_reg(i_regmap,CCREG);
6148         assert(cc==HOST_CCREG);
6149         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6150         int jaddr=(int)out;
6151         emit_jns(0);
6152         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6153       }
6154     }
6155   }
6156 }
6157
6158 void fjump_assemble(int i,struct regstat *i_regs)
6159 {
6160   signed char *i_regmap=i_regs->regmap;
6161   int cc;
6162   int match;
6163   match=match_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6164   assem_debug("fmatch=%d\n",match);
6165   int fs,cs;
6166   int eaddr;
6167   int invert=0;
6168   int internal=internal_branch(branch_regs[i].is32,ba[i]);
6169   if(i==(ba[i]-start)>>2) assem_debug("idle loop\n");
6170   if(!match) invert=1;
6171   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6172   if(i>(ba[i]-start)>>2) invert=1;
6173   #endif
6174
6175   if(ooo[i]) {
6176     fs=get_reg(branch_regs[i].regmap,FSREG);
6177     address_generation(i+1,i_regs,regs[i].regmap_entry); // Is this okay?
6178   }
6179   else {
6180     fs=get_reg(i_regmap,FSREG);
6181   }
6182
6183   // Check cop1 unusable
6184   if(!cop1_usable) {
6185     cs=get_reg(i_regmap,CSREG);
6186     assert(cs>=0);
6187     emit_testimm(cs,0x20000000);
6188     eaddr=(int)out;
6189     emit_jeq(0);
6190     add_stub(FP_STUB,eaddr,(int)out,i,cs,(int)i_regs,0,0);
6191     cop1_usable=1;
6192   }
6193
6194   if(ooo[i]) {
6195     // Out of order execution (delay slot first)
6196     //printf("OOOE\n");
6197     ds_assemble(i+1,i_regs);
6198     int adj;
6199     uint64_t bc_unneeded=branch_regs[i].u;
6200     uint64_t bc_unneeded_upper=branch_regs[i].uu;
6201     bc_unneeded&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6202     bc_unneeded_upper&=~((1LL<<us1[i])|(1LL<<us2[i]));
6203     bc_unneeded|=1;
6204     bc_unneeded_upper|=1;
6205     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6206                   bc_unneeded,bc_unneeded_upper);
6207     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i],rs1[i]);
6208     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6209     cc=get_reg(branch_regs[i].regmap,CCREG);
6210     assert(cc==HOST_CCREG);
6211     do_cc(i,branch_regs[i].regmap,&adj,-1,0,invert);
6212     assem_debug("cycle count (adj)\n");
6213     if(1) {
6214       int nottaken=0;
6215       if(adj&&!invert) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6216       if(1) {
6217         assert(fs>=0);
6218         emit_testimm(fs,0x800000);
6219         if(source[i]&0x10000) // BC1T
6220         {
6221           if(invert){
6222             nottaken=(int)out;
6223             emit_jeq(1);
6224           }else{
6225             add_to_linker((int)out,ba[i],internal);
6226             emit_jne(0);
6227           }
6228         }
6229         else // BC1F
6230           if(invert){
6231             nottaken=(int)out;
6232             emit_jne(1);
6233           }else{
6234             add_to_linker((int)out,ba[i],internal);
6235             emit_jeq(0);
6236           }
6237         {
6238         }
6239       } // if(!only32)
6240           
6241       if(invert) {
6242         if(adj) emit_addimm(cc,-CLOCK_DIVIDER*adj,cc);
6243         #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
6244         else if(match) emit_addnop(13);
6245         #endif
6246         store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6247         load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6248         if(internal)
6249           assem_debug("branch: internal\n");
6250         else
6251           assem_debug("branch: external\n");
6252         if(internal&&is_ds[(ba[i]-start)>>2]) {
6253           ds_assemble_entry(i);
6254         }
6255         else {
6256           add_to_linker((int)out,ba[i],internal);
6257           emit_jmp(0);
6258         }
6259         set_jump_target(nottaken,(int)out);
6260       }
6261
6262       if(adj) {
6263         if(!invert) emit_addimm(cc,CLOCK_DIVIDER*adj,cc);
6264       }
6265     } // (!unconditional)
6266   } // if(ooo)
6267   else
6268   {
6269     // In-order execution (branch first)
6270     //printf("IOE\n");
6271     int nottaken=0;
6272     if(1) {
6273       //printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
6274       if(1) {
6275         assert(fs>=0);
6276         emit_testimm(fs,0x800000);
6277         if(source[i]&0x10000) // BC1T
6278         {
6279           nottaken=(int)out;
6280           emit_jeq(1);
6281         }
6282         else // BC1F
6283         {
6284           nottaken=(int)out;
6285           emit_jne(1);
6286         }
6287       }
6288     } // if(!unconditional)
6289     int adj;
6290     uint64_t ds_unneeded=branch_regs[i].u;
6291     uint64_t ds_unneeded_upper=branch_regs[i].uu;
6292     ds_unneeded&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6293     ds_unneeded_upper&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6294     if((~ds_unneeded_upper>>rt1[i+1])&1) ds_unneeded_upper&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
6295     ds_unneeded|=1;
6296     ds_unneeded_upper|=1;
6297     // branch taken
6298     //assem_debug("1:\n");
6299     wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6300                   ds_unneeded,ds_unneeded_upper);
6301     // load regs
6302     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6303     address_generation(i+1,&branch_regs[i],0);
6304     load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,INVCP);
6305     ds_assemble(i+1,&branch_regs[i]);
6306     cc=get_reg(branch_regs[i].regmap,CCREG);
6307     if(cc==-1) {
6308       emit_loadreg(CCREG,cc=HOST_CCREG);
6309       // CHECK: Is the following instruction (fall thru) allocated ok?
6310     }
6311     assert(cc==HOST_CCREG);
6312     store_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6313     do_cc(i,i_regmap,&adj,ba[i],TAKEN,0);
6314     assem_debug("cycle count (adj)\n");
6315     if(adj) emit_addimm(cc,CLOCK_DIVIDER*(ccadj[i]+2-adj),cc);
6316     load_regs_bt(branch_regs[i].regmap,branch_regs[i].is32,branch_regs[i].dirty,ba[i]);
6317     if(internal)
6318       assem_debug("branch: internal\n");
6319     else
6320       assem_debug("branch: external\n");
6321     if(internal&&is_ds[(ba[i]-start)>>2]) {
6322       ds_assemble_entry(i);
6323     }
6324     else {
6325       add_to_linker((int)out,ba[i],internal);
6326       emit_jmp(0);
6327     }
6328
6329     // branch not taken
6330     if(1) { // <- FIXME (don't need this)
6331       set_jump_target(nottaken,(int)out);
6332       assem_debug("1:\n");
6333       if(!likely[i]) {
6334         wb_invalidate(regs[i].regmap,branch_regs[i].regmap,regs[i].dirty,regs[i].is32,
6335                       ds_unneeded,ds_unneeded_upper);
6336         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,rs1[i+1],rs2[i+1]);
6337         address_generation(i+1,&branch_regs[i],0);
6338         load_regs(regs[i].regmap,branch_regs[i].regmap,regs[i].was32,CCREG,CCREG);
6339         ds_assemble(i+1,&branch_regs[i]);
6340       }
6341       cc=get_reg(branch_regs[i].regmap,CCREG);
6342       if(cc==-1&&!likely[i]) {
6343         // Cycle count isn't in a register, temporarily load it then write it out
6344         emit_loadreg(CCREG,HOST_CCREG);
6345         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6346         int jaddr=(int)out;
6347         emit_jns(0);
6348         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,NOTTAKEN,0);
6349         emit_storereg(CCREG,HOST_CCREG);
6350       }
6351       else{
6352         cc=get_reg(i_regmap,CCREG);
6353         assert(cc==HOST_CCREG);
6354         emit_addimm_and_set_flags(CLOCK_DIVIDER*(ccadj[i]+2),cc);
6355         int jaddr=(int)out;
6356         emit_jns(0);
6357         add_stub(CC_STUB,jaddr,(int)out,0,i,start+i*4+8,likely[i]?NULLDS:NOTTAKEN,0);
6358       }
6359     }
6360   }
6361 }
6362
6363 static void pagespan_assemble(int i,struct regstat *i_regs)
6364 {
6365   int s1l=get_reg(i_regs->regmap,rs1[i]);
6366   int s1h=get_reg(i_regs->regmap,rs1[i]|64);
6367   int s2l=get_reg(i_regs->regmap,rs2[i]);
6368   int s2h=get_reg(i_regs->regmap,rs2[i]|64);
6369   void *nt_branch=NULL;
6370   int taken=0;
6371   int nottaken=0;
6372   int unconditional=0;
6373   if(rs1[i]==0)
6374   {
6375     s1l=s2l;s1h=s2h;
6376     s2l=s2h=-1;
6377   }
6378   else if(rs2[i]==0)
6379   {
6380     s2l=s2h=-1;
6381   }
6382   if((i_regs->is32>>rs1[i])&(i_regs->is32>>rs2[i])&1) {
6383     s1h=s2h=-1;
6384   }
6385   int hr=0;
6386   int addr,alt,ntaddr;
6387   if(i_regs->regmap[HOST_BTREG]<0) {addr=HOST_BTREG;}
6388   else {
6389     while(hr<HOST_REGS)
6390     {
6391       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG &&
6392          (i_regs->regmap[hr]&63)!=rs1[i] &&
6393          (i_regs->regmap[hr]&63)!=rs2[i] )
6394       {
6395         addr=hr++;break;
6396       }
6397       hr++;
6398     }
6399   }
6400   while(hr<HOST_REGS)
6401   {
6402     if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6403        (i_regs->regmap[hr]&63)!=rs1[i] &&
6404        (i_regs->regmap[hr]&63)!=rs2[i] )
6405     {
6406       alt=hr++;break;
6407     }
6408     hr++;
6409   }
6410   if((opcode[i]&0x2E)==6) // BLEZ/BGTZ needs another register
6411   {
6412     while(hr<HOST_REGS)
6413     {
6414       if(hr!=EXCLUDE_REG && hr!=HOST_CCREG && hr!=HOST_BTREG &&
6415          (i_regs->regmap[hr]&63)!=rs1[i] &&
6416          (i_regs->regmap[hr]&63)!=rs2[i] )
6417       {
6418         ntaddr=hr;break;
6419       }
6420       hr++;
6421     }
6422   }
6423   assert(hr<HOST_REGS);
6424   if((opcode[i]&0x2e)==4||opcode[i]==0x11) { // BEQ/BNE/BEQL/BNEL/BC1
6425     load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
6426   }
6427   emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i]+2),HOST_CCREG);
6428   if(opcode[i]==2) // J
6429   {
6430     unconditional=1;
6431   }
6432   if(opcode[i]==3) // JAL
6433   {
6434     // TODO: mini_ht
6435     int rt=get_reg(i_regs->regmap,31);
6436     emit_movimm(start+i*4+8,rt);
6437     unconditional=1;
6438   }
6439   if(opcode[i]==0&&(opcode2[i]&0x3E)==8) // JR/JALR
6440   {
6441     emit_mov(s1l,addr);
6442     if(opcode2[i]==9) // JALR
6443     {
6444       int rt=get_reg(i_regs->regmap,rt1[i]);
6445       emit_movimm(start+i*4+8,rt);
6446     }
6447   }
6448   if((opcode[i]&0x3f)==4) // BEQ
6449   {
6450     if(rs1[i]==rs2[i])
6451     {
6452       unconditional=1;
6453     }
6454     else
6455     #ifdef HAVE_CMOV_IMM
6456     if(s1h<0) {
6457       if(s2l>=0) emit_cmp(s1l,s2l);
6458       else emit_test(s1l,s1l);
6459       emit_cmov2imm_e_ne_compact(ba[i],start+i*4+8,addr);
6460     }
6461     else
6462     #endif
6463     {
6464       assert(s1l>=0);
6465       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6466       if(s1h>=0) {
6467         if(s2h>=0) emit_cmp(s1h,s2h);
6468         else emit_test(s1h,s1h);
6469         emit_cmovne_reg(alt,addr);
6470       }
6471       if(s2l>=0) emit_cmp(s1l,s2l);
6472       else emit_test(s1l,s1l);
6473       emit_cmovne_reg(alt,addr);
6474     }
6475   }
6476   if((opcode[i]&0x3f)==5) // BNE
6477   {
6478     #ifdef HAVE_CMOV_IMM
6479     if(s1h<0) {
6480       if(s2l>=0) emit_cmp(s1l,s2l);
6481       else emit_test(s1l,s1l);
6482       emit_cmov2imm_e_ne_compact(start+i*4+8,ba[i],addr);
6483     }
6484     else
6485     #endif
6486     {
6487       assert(s1l>=0);
6488       emit_mov2imm_compact(start+i*4+8,addr,ba[i],alt);
6489       if(s1h>=0) {
6490         if(s2h>=0) emit_cmp(s1h,s2h);
6491         else emit_test(s1h,s1h);
6492         emit_cmovne_reg(alt,addr);
6493       }
6494       if(s2l>=0) emit_cmp(s1l,s2l);
6495       else emit_test(s1l,s1l);
6496       emit_cmovne_reg(alt,addr);
6497     }
6498   }
6499   if((opcode[i]&0x3f)==0x14) // BEQL
6500   {
6501     if(s1h>=0) {
6502       if(s2h>=0) emit_cmp(s1h,s2h);
6503       else emit_test(s1h,s1h);
6504       nottaken=(int)out;
6505       emit_jne(0);
6506     }
6507     if(s2l>=0) emit_cmp(s1l,s2l);
6508     else emit_test(s1l,s1l);
6509     if(nottaken) set_jump_target(nottaken,(int)out);
6510     nottaken=(int)out;
6511     emit_jne(0);
6512   }
6513   if((opcode[i]&0x3f)==0x15) // BNEL
6514   {
6515     if(s1h>=0) {
6516       if(s2h>=0) emit_cmp(s1h,s2h);
6517       else emit_test(s1h,s1h);
6518       taken=(int)out;
6519       emit_jne(0);
6520     }
6521     if(s2l>=0) emit_cmp(s1l,s2l);
6522     else emit_test(s1l,s1l);
6523     nottaken=(int)out;
6524     emit_jeq(0);
6525     if(taken) set_jump_target(taken,(int)out);
6526   }
6527   if((opcode[i]&0x3f)==6) // BLEZ
6528   {
6529     emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6530     emit_cmpimm(s1l,1);
6531     if(s1h>=0) emit_mov(addr,ntaddr);
6532     emit_cmovl_reg(alt,addr);
6533     if(s1h>=0) {
6534       emit_test(s1h,s1h);
6535       emit_cmovne_reg(ntaddr,addr);
6536       emit_cmovs_reg(alt,addr);
6537     }
6538   }
6539   if((opcode[i]&0x3f)==7) // BGTZ
6540   {
6541     emit_mov2imm_compact(ba[i],addr,start+i*4+8,ntaddr);
6542     emit_cmpimm(s1l,1);
6543     if(s1h>=0) emit_mov(addr,alt);
6544     emit_cmovl_reg(ntaddr,addr);
6545     if(s1h>=0) {
6546       emit_test(s1h,s1h);
6547       emit_cmovne_reg(alt,addr);
6548       emit_cmovs_reg(ntaddr,addr);
6549     }
6550   }
6551   if((opcode[i]&0x3f)==0x16) // BLEZL
6552   {
6553     assert((opcode[i]&0x3f)!=0x16);
6554   }
6555   if((opcode[i]&0x3f)==0x17) // BGTZL
6556   {
6557     assert((opcode[i]&0x3f)!=0x17);
6558   }
6559   assert(opcode[i]!=1); // BLTZ/BGEZ
6560
6561   //FIXME: Check CSREG
6562   if(opcode[i]==0x11 && opcode2[i]==0x08 ) {
6563     if((source[i]&0x30000)==0) // BC1F
6564     {
6565       emit_mov2imm_compact(ba[i],addr,start+i*4+8,alt);
6566       emit_testimm(s1l,0x800000);
6567       emit_cmovne_reg(alt,addr);
6568     }
6569     if((source[i]&0x30000)==0x10000) // BC1T
6570     {
6571       emit_mov2imm_compact(ba[i],alt,start+i*4+8,addr);
6572       emit_testimm(s1l,0x800000);
6573       emit_cmovne_reg(alt,addr);
6574     }
6575     if((source[i]&0x30000)==0x20000) // BC1FL
6576     {
6577       emit_testimm(s1l,0x800000);
6578       nottaken=(int)out;
6579       emit_jne(0);
6580     }
6581     if((source[i]&0x30000)==0x30000) // BC1TL
6582     {
6583       emit_testimm(s1l,0x800000);
6584       nottaken=(int)out;
6585       emit_jeq(0);
6586     }
6587   }
6588
6589   assert(i_regs->regmap[HOST_CCREG]==CCREG);
6590   wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6591   if(likely[i]||unconditional)
6592   {
6593     emit_movimm(ba[i],HOST_BTREG);
6594   }
6595   else if(addr!=HOST_BTREG)
6596   {
6597     emit_mov(addr,HOST_BTREG);
6598   }
6599   void *branch_addr=out;
6600   emit_jmp(0);
6601   int target_addr=start+i*4+5;
6602   void *stub=out;
6603   void *compiled_target_addr=check_addr(target_addr);
6604   emit_extjump_ds((int)branch_addr,target_addr);
6605   if(compiled_target_addr) {
6606     set_jump_target((int)branch_addr,(int)compiled_target_addr);
6607     add_link(target_addr,stub);
6608   }
6609   else set_jump_target((int)branch_addr,(int)stub);
6610   if(likely[i]) {
6611     // Not-taken path
6612     set_jump_target((int)nottaken,(int)out);
6613     wb_dirtys(regs[i].regmap,regs[i].is32,regs[i].dirty);
6614     void *branch_addr=out;
6615     emit_jmp(0);
6616     int target_addr=start+i*4+8;
6617     void *stub=out;
6618     void *compiled_target_addr=check_addr(target_addr);
6619     emit_extjump_ds((int)branch_addr,target_addr);
6620     if(compiled_target_addr) {
6621       set_jump_target((int)branch_addr,(int)compiled_target_addr);
6622       add_link(target_addr,stub);
6623     }
6624     else set_jump_target((int)branch_addr,(int)stub);
6625   }
6626 }
6627
6628 // Assemble the delay slot for the above
6629 static void pagespan_ds()
6630 {
6631   assem_debug("initial delay slot:\n");
6632   u_int vaddr=start+1;
6633   u_int page=get_page(vaddr);
6634   u_int vpage=get_vpage(vaddr);
6635   ll_add(jump_dirty+vpage,vaddr,(void *)out);
6636   do_dirty_stub_ds();
6637   ll_add(jump_in+page,vaddr,(void *)out);
6638   assert(regs[0].regmap_entry[HOST_CCREG]==CCREG);
6639   if(regs[0].regmap[HOST_CCREG]!=CCREG)
6640     wb_register(CCREG,regs[0].regmap_entry,regs[0].wasdirty,regs[0].was32);
6641   if(regs[0].regmap[HOST_BTREG]!=BTREG)
6642     emit_writeword(HOST_BTREG,(int)&branch_target);
6643   load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,rs1[0],rs2[0]);
6644   address_generation(0,&regs[0],regs[0].regmap_entry);
6645   if(itype[0]==STORE||itype[0]==STORELR||(opcode[0]&0x3b)==0x39||(opcode[0]&0x3b)==0x3a)
6646     load_regs(regs[0].regmap_entry,regs[0].regmap,regs[0].was32,INVCP,INVCP);
6647   cop1_usable=0;
6648   is_delayslot=0;
6649   switch(itype[0]) {
6650     case ALU:
6651       alu_assemble(0,&regs[0]);break;
6652     case IMM16:
6653       imm16_assemble(0,&regs[0]);break;
6654     case SHIFT:
6655       shift_assemble(0,&regs[0]);break;
6656     case SHIFTIMM:
6657       shiftimm_assemble(0,&regs[0]);break;
6658     case LOAD:
6659       load_assemble(0,&regs[0]);break;
6660     case LOADLR:
6661       loadlr_assemble(0,&regs[0]);break;
6662     case STORE:
6663       store_assemble(0,&regs[0]);break;
6664     case STORELR:
6665       storelr_assemble(0,&regs[0]);break;
6666     case COP0:
6667       cop0_assemble(0,&regs[0]);break;
6668     case COP1:
6669       cop1_assemble(0,&regs[0]);break;
6670     case C1LS:
6671       c1ls_assemble(0,&regs[0]);break;
6672     case COP2:
6673       cop2_assemble(0,&regs[0]);break;
6674     case C2LS:
6675       c2ls_assemble(0,&regs[0]);break;
6676     case C2OP:
6677       c2op_assemble(0,&regs[0]);break;
6678     case FCONV:
6679       fconv_assemble(0,&regs[0]);break;
6680     case FLOAT:
6681       float_assemble(0,&regs[0]);break;
6682     case FCOMP:
6683       fcomp_assemble(0,&regs[0]);break;
6684     case MULTDIV:
6685       multdiv_assemble(0,&regs[0]);break;
6686     case MOV:
6687       mov_assemble(0,&regs[0]);break;
6688     case SYSCALL:
6689     case HLECALL:
6690     case INTCALL:
6691     case SPAN:
6692     case UJUMP:
6693     case RJUMP:
6694     case CJUMP:
6695     case SJUMP:
6696     case FJUMP:
6697       printf("Jump in the delay slot.  This is probably a bug.\n");
6698   }
6699   int btaddr=get_reg(regs[0].regmap,BTREG);
6700   if(btaddr<0) {
6701     btaddr=get_reg(regs[0].regmap,-1);
6702     emit_readword((int)&branch_target,btaddr);
6703   }
6704   assert(btaddr!=HOST_CCREG);
6705   if(regs[0].regmap[HOST_CCREG]!=CCREG) emit_loadreg(CCREG,HOST_CCREG);
6706 #ifdef HOST_IMM8
6707   emit_movimm(start+4,HOST_TEMPREG);
6708   emit_cmp(btaddr,HOST_TEMPREG);
6709 #else
6710   emit_cmpimm(btaddr,start+4);
6711 #endif
6712   int branch=(int)out;
6713   emit_jeq(0);
6714   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,-1);
6715   emit_jmp(jump_vaddr_reg[btaddr]);
6716   set_jump_target(branch,(int)out);
6717   store_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6718   load_regs_bt(regs[0].regmap,regs[0].is32,regs[0].dirty,start+4);
6719 }
6720
6721 // Basic liveness analysis for MIPS registers
6722 void unneeded_registers(int istart,int iend,int r)
6723 {
6724   int i;
6725   uint64_t u,uu,gte_u,b,bu,gte_bu;
6726   uint64_t temp_u,temp_uu,temp_gte_u;
6727   uint64_t tdep;
6728   if(iend==slen-1) {
6729     u=1;uu=1;
6730   }else{
6731     u=unneeded_reg[iend+1];
6732     uu=unneeded_reg_upper[iend+1];
6733     u=1;uu=1;
6734   }
6735   gte_u=temp_gte_u=0;
6736
6737   for (i=iend;i>=istart;i--)
6738   {
6739     //printf("unneeded registers i=%d (%d,%d) r=%d\n",i,istart,iend,r);
6740     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
6741     {
6742       // If subroutine call, flag return address as a possible branch target
6743       if(rt1[i]==31 && i<slen-2) bt[i+2]=1;
6744       
6745       if(ba[i]<start || ba[i]>=(start+slen*4))
6746       {
6747         // Branch out of this block, flush all regs
6748         u=1;
6749         uu=1;
6750         gte_u=0;
6751         /* Hexagon hack 
6752         if(itype[i]==UJUMP&&rt1[i]==31)
6753         {
6754           uu=u=0x300C00F; // Discard at, v0-v1, t6-t9
6755         }
6756         if(itype[i]==RJUMP&&rs1[i]==31)
6757         {
6758           uu=u=0x300C0F3; // Discard at, a0-a3, t6-t9
6759         }
6760         if(start>0x80000400&&start<0x80000000+RAM_SIZE) {
6761           if(itype[i]==UJUMP&&rt1[i]==31)
6762           {
6763             //uu=u=0x30300FF0FLL; // Discard at, v0-v1, t0-t9, lo, hi
6764             uu=u=0x300FF0F; // Discard at, v0-v1, t0-t9
6765           }
6766           if(itype[i]==RJUMP&&rs1[i]==31)
6767           {
6768             //uu=u=0x30300FFF3LL; // Discard at, a0-a3, t0-t9, lo, hi
6769             uu=u=0x300FFF3; // Discard at, a0-a3, t0-t9
6770           }
6771         }*/
6772         branch_unneeded_reg[i]=u;
6773         branch_unneeded_reg_upper[i]=uu;
6774         // Merge in delay slot
6775         tdep=(~uu>>rt1[i+1])&1;
6776         u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6777         uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6778         u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6779         uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6780         uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6781         u|=1;uu|=1;
6782         gte_u|=gte_rt[i+1];
6783         gte_u&=~gte_rs[i+1];
6784         // If branch is "likely" (and conditional)
6785         // then we skip the delay slot on the fall-thru path
6786         if(likely[i]) {
6787           if(i<slen-1) {
6788             u&=unneeded_reg[i+2];
6789             uu&=unneeded_reg_upper[i+2];
6790             gte_u&=gte_unneeded[i+2];
6791           }
6792           else
6793           {
6794             u=1;
6795             uu=1;
6796             gte_u=0;
6797           }
6798         }
6799       }
6800       else
6801       {
6802         // Internal branch, flag target
6803         bt[(ba[i]-start)>>2]=1;
6804         if(ba[i]<=start+i*4) {
6805           // Backward branch
6806           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6807           {
6808             // Unconditional branch
6809             temp_u=1;temp_uu=1;
6810             temp_gte_u=0;
6811           } else {
6812             // Conditional branch (not taken case)
6813             temp_u=unneeded_reg[i+2];
6814             temp_uu=unneeded_reg_upper[i+2];
6815             temp_gte_u&=gte_unneeded[i+2];
6816           }
6817           // Merge in delay slot
6818           tdep=(~temp_uu>>rt1[i+1])&1;
6819           temp_u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6820           temp_uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6821           temp_u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6822           temp_uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6823           temp_uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6824           temp_u|=1;temp_uu|=1;
6825           temp_gte_u|=gte_rt[i+1];
6826           temp_gte_u&=~gte_rs[i+1];
6827           // If branch is "likely" (and conditional)
6828           // then we skip the delay slot on the fall-thru path
6829           if(likely[i]) {
6830             if(i<slen-1) {
6831               temp_u&=unneeded_reg[i+2];
6832               temp_uu&=unneeded_reg_upper[i+2];
6833               temp_gte_u&=gte_unneeded[i+2];
6834             }
6835             else
6836             {
6837               temp_u=1;
6838               temp_uu=1;
6839               temp_gte_u=0;
6840             }
6841           }
6842           tdep=(~temp_uu>>rt1[i])&1;
6843           temp_u|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6844           temp_uu|=(1LL<<rt1[i])|(1LL<<rt2[i]);
6845           temp_u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
6846           temp_uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
6847           temp_uu&=~((tdep<<dep1[i])|(tdep<<dep2[i]));
6848           temp_u|=1;temp_uu|=1;
6849           temp_gte_u|=gte_rt[i];
6850           temp_gte_u&=~gte_rs[i];
6851           unneeded_reg[i]=temp_u;
6852           unneeded_reg_upper[i]=temp_uu;
6853           gte_unneeded[i]=temp_gte_u;
6854           // Only go three levels deep.  This recursion can take an
6855           // excessive amount of time if there are a lot of nested loops.
6856           if(r<2) {
6857             unneeded_registers((ba[i]-start)>>2,i-1,r+1);
6858           }else{
6859             unneeded_reg[(ba[i]-start)>>2]=1;
6860             unneeded_reg_upper[(ba[i]-start)>>2]=1;
6861             gte_unneeded[(ba[i]-start)>>2]=0;
6862           }
6863         } /*else*/ if(1) {
6864           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
6865           {
6866             // Unconditional branch
6867             u=unneeded_reg[(ba[i]-start)>>2];
6868             uu=unneeded_reg_upper[(ba[i]-start)>>2];
6869             gte_u=gte_unneeded[(ba[i]-start)>>2];
6870             branch_unneeded_reg[i]=u;
6871             branch_unneeded_reg_upper[i]=uu;
6872         //u=1;
6873         //uu=1;
6874         //branch_unneeded_reg[i]=u;
6875         //branch_unneeded_reg_upper[i]=uu;
6876             // Merge in delay slot
6877             tdep=(~uu>>rt1[i+1])&1;
6878             u|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6879             uu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6880             u&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6881             uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6882             uu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6883             u|=1;uu|=1;
6884             gte_u|=gte_rt[i+1];
6885             gte_u&=~gte_rs[i+1];
6886           } else {
6887             // Conditional branch
6888             b=unneeded_reg[(ba[i]-start)>>2];
6889             bu=unneeded_reg_upper[(ba[i]-start)>>2];
6890             gte_bu=gte_unneeded[(ba[i]-start)>>2];
6891             branch_unneeded_reg[i]=b;
6892             branch_unneeded_reg_upper[i]=bu;
6893         //b=1;
6894         //bu=1;
6895         //branch_unneeded_reg[i]=b;
6896         //branch_unneeded_reg_upper[i]=bu;
6897             // Branch delay slot
6898             tdep=(~uu>>rt1[i+1])&1;
6899             b|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6900             bu|=(1LL<<rt1[i+1])|(1LL<<rt2[i+1]);
6901             b&=~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
6902             bu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
6903             bu&=~((tdep<<dep1[i+1])|(tdep<<dep2[i+1]));
6904             b|=1;bu|=1;
6905             gte_bu|=gte_rt[i+1];
6906             gte_bu&=~gte_rs[i+1];
6907             // If branch is "likely" then we skip the
6908             // delay slot on the fall-thru path
6909             if(likely[i]) {
6910               u=b;
6911               uu=bu;
6912               gte_u=gte_bu;
6913               if(i<slen-1) {
6914                 u&=unneeded_reg[i+2];
6915                 uu&=unneeded_reg_upper[i+2];
6916                 gte_u&=gte_unneeded[i+2];
6917         //u=1;
6918         //uu=1;
6919               }
6920             } else {
6921               u&=b;
6922               uu&=bu;
6923               gte_u&=gte_bu;
6924         //u=1;
6925         //uu=1;
6926             }
6927             if(i<slen-1) {
6928               branch_unneeded_reg[i]&=unneeded_reg[i+2];
6929               branch_unneeded_reg_upper[i]&=unneeded_reg_upper[i+2];
6930         //branch_unneeded_reg[i]=1;
6931         //branch_unneeded_reg_upper[i]=1;
6932             } else {
6933               branch_unneeded_reg[i]=1;
6934               branch_unneeded_reg_upper[i]=1;
6935             }
6936           }
6937         }
6938       }
6939     }
6940     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
6941     {
6942       // SYSCALL instruction (software interrupt)
6943       u=1;
6944       uu=1;
6945     }
6946     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
6947     {
6948       // ERET instruction (return from interrupt)
6949       u=1;
6950       uu=1;
6951     }
6952     //u=uu=1; // DEBUG
6953     tdep=(~uu>>rt1[i])&1;
6954     // Written registers are unneeded
6955     u|=1LL<<rt1[i];
6956     u|=1LL<<rt2[i];
6957     uu|=1LL<<rt1[i];
6958     uu|=1LL<<rt2[i];
6959     gte_u|=gte_rt[i];
6960     // Accessed registers are needed
6961     u&=~(1LL<<rs1[i]);
6962     u&=~(1LL<<rs2[i]);
6963     uu&=~(1LL<<us1[i]);
6964     uu&=~(1LL<<us2[i]);
6965     gte_u&=~gte_rs[i];
6966     // Source-target dependencies
6967     uu&=~(tdep<<dep1[i]);
6968     uu&=~(tdep<<dep2[i]);
6969     // R0 is always unneeded
6970     u|=1;uu|=1;
6971     // Save it
6972     unneeded_reg[i]=u;
6973     unneeded_reg_upper[i]=uu;
6974     gte_unneeded[i]=gte_u;
6975     /*
6976     printf("ur (%d,%d) %x: ",istart,iend,start+i*4);
6977     printf("U:");
6978     int r;
6979     for(r=1;r<=CCREG;r++) {
6980       if((unneeded_reg[i]>>r)&1) {
6981         if(r==HIREG) printf(" HI");
6982         else if(r==LOREG) printf(" LO");
6983         else printf(" r%d",r);
6984       }
6985     }
6986     printf(" UU:");
6987     for(r=1;r<=CCREG;r++) {
6988       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
6989         if(r==HIREG) printf(" HI");
6990         else if(r==LOREG) printf(" LO");
6991         else printf(" r%d",r);
6992       }
6993     }
6994     printf("\n");*/
6995   }
6996 #ifdef FORCE32
6997   for (i=iend;i>=istart;i--)
6998   {
6999     unneeded_reg_upper[i]=branch_unneeded_reg_upper[i]=-1LL;
7000   }
7001 #endif
7002 }
7003
7004 // Identify registers which are likely to contain 32-bit values
7005 // This is used to predict whether any branches will jump to a
7006 // location with 64-bit values in registers.
7007 static void provisional_32bit()
7008 {
7009   int i,j;
7010   uint64_t is32=1;
7011   uint64_t lastbranch=1;
7012   
7013   for(i=0;i<slen;i++)
7014   {
7015     if(i>0) {
7016       if(itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP) {
7017         if(i>1) is32=lastbranch;
7018         else is32=1;
7019       }
7020     }
7021     if(i>1)
7022     {
7023       if(itype[i-2]==CJUMP||itype[i-2]==SJUMP||itype[i-2]==FJUMP) {
7024         if(likely[i-2]) {
7025           if(i>2) is32=lastbranch;
7026           else is32=1;
7027         }
7028       }
7029       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
7030       {
7031         if(rs1[i-2]==0||rs2[i-2]==0)
7032         {
7033           if(rs1[i-2]) {
7034             is32|=1LL<<rs1[i-2];
7035           }
7036           if(rs2[i-2]) {
7037             is32|=1LL<<rs2[i-2];
7038           }
7039         }
7040       }
7041     }
7042     // If something jumps here with 64-bit values
7043     // then promote those registers to 64 bits
7044     if(bt[i])
7045     {
7046       uint64_t temp_is32=is32;
7047       for(j=i-1;j>=0;j--)
7048       {
7049         if(ba[j]==start+i*4) 
7050           //temp_is32&=branch_regs[j].is32;
7051           temp_is32&=p32[j];
7052       }
7053       for(j=i;j<slen;j++)
7054       {
7055         if(ba[j]==start+i*4) 
7056           temp_is32=1;
7057       }
7058       is32=temp_is32;
7059     }
7060     int type=itype[i];
7061     int op=opcode[i];
7062     int op2=opcode2[i];
7063     int rt=rt1[i];
7064     int s1=rs1[i];
7065     int s2=rs2[i];
7066     if(type==UJUMP||type==RJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
7067       // Branches don't write registers, consider the delay slot instead.
7068       type=itype[i+1];
7069       op=opcode[i+1];
7070       op2=opcode2[i+1];
7071       rt=rt1[i+1];
7072       s1=rs1[i+1];
7073       s2=rs2[i+1];
7074       lastbranch=is32;
7075     }
7076     switch(type) {
7077       case LOAD:
7078         if(opcode[i]==0x27||opcode[i]==0x37|| // LWU/LD
7079            opcode[i]==0x1A||opcode[i]==0x1B) // LDL/LDR
7080           is32&=~(1LL<<rt);
7081         else
7082           is32|=1LL<<rt;
7083         break;
7084       case STORE:
7085       case STORELR:
7086         break;
7087       case LOADLR:
7088         if(op==0x1a||op==0x1b) is32&=~(1LL<<rt); // LDR/LDL
7089         if(op==0x22) is32|=1LL<<rt; // LWL
7090         break;
7091       case IMM16:
7092         if (op==0x08||op==0x09|| // ADDI/ADDIU
7093             op==0x0a||op==0x0b|| // SLTI/SLTIU
7094             op==0x0c|| // ANDI
7095             op==0x0f)  // LUI
7096         {
7097           is32|=1LL<<rt;
7098         }
7099         if(op==0x18||op==0x19) { // DADDI/DADDIU
7100           is32&=~(1LL<<rt);
7101           //if(imm[i]==0)
7102           //  is32|=((is32>>s1)&1LL)<<rt;
7103         }
7104         if(op==0x0d||op==0x0e) { // ORI/XORI
7105           uint64_t sr=((is32>>s1)&1LL);
7106           is32&=~(1LL<<rt);
7107           is32|=sr<<rt;
7108         }
7109         break;
7110       case UJUMP:
7111         break;
7112       case RJUMP:
7113         break;
7114       case CJUMP:
7115         break;
7116       case SJUMP:
7117         break;
7118       case FJUMP:
7119         break;
7120       case ALU:
7121         if(op2>=0x20&&op2<=0x23) { // ADD/ADDU/SUB/SUBU
7122           is32|=1LL<<rt;
7123         }
7124         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
7125           is32|=1LL<<rt;
7126         }
7127         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
7128           uint64_t sr=((is32>>s1)&(is32>>s2)&1LL);
7129           is32&=~(1LL<<rt);
7130           is32|=sr<<rt;
7131         }
7132         else if(op2>=0x2c&&op2<=0x2d) { // DADD/DADDU
7133           if(s1==0&&s2==0) {
7134             is32|=1LL<<rt;
7135           }
7136           else if(s2==0) {
7137             uint64_t sr=((is32>>s1)&1LL);
7138             is32&=~(1LL<<rt);
7139             is32|=sr<<rt;
7140           }
7141           else if(s1==0) {
7142             uint64_t sr=((is32>>s2)&1LL);
7143             is32&=~(1LL<<rt);
7144             is32|=sr<<rt;
7145           }
7146           else {
7147             is32&=~(1LL<<rt);
7148           }
7149         }
7150         else if(op2>=0x2e&&op2<=0x2f) { // DSUB/DSUBU
7151           if(s1==0&&s2==0) {
7152             is32|=1LL<<rt;
7153           }
7154           else if(s2==0) {
7155             uint64_t sr=((is32>>s1)&1LL);
7156             is32&=~(1LL<<rt);
7157             is32|=sr<<rt;
7158           }
7159           else {
7160             is32&=~(1LL<<rt);
7161           }
7162         }
7163         break;
7164       case MULTDIV:
7165         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
7166           is32&=~((1LL<<HIREG)|(1LL<<LOREG));
7167         }
7168         else {
7169           is32|=(1LL<<HIREG)|(1LL<<LOREG);
7170         }
7171         break;
7172       case MOV:
7173         {
7174           uint64_t sr=((is32>>s1)&1LL);
7175           is32&=~(1LL<<rt);
7176           is32|=sr<<rt;
7177         }
7178         break;
7179       case SHIFT:
7180         if(op2>=0x14&&op2<=0x17) is32&=~(1LL<<rt); // DSLLV/DSRLV/DSRAV
7181         else is32|=1LL<<rt; // SLLV/SRLV/SRAV
7182         break;
7183       case SHIFTIMM:
7184         is32|=1LL<<rt;
7185         // DSLL/DSRL/DSRA/DSLL32/DSRL32 but not DSRA32 have 64-bit result
7186         if(op2>=0x38&&op2<0x3f) is32&=~(1LL<<rt);
7187         break;
7188       case COP0:
7189         if(op2==0) is32|=1LL<<rt; // MFC0
7190         break;
7191       case COP1:
7192       case COP2:
7193         if(op2==0) is32|=1LL<<rt; // MFC1
7194         if(op2==1) is32&=~(1LL<<rt); // DMFC1
7195         if(op2==2) is32|=1LL<<rt; // CFC1
7196         break;
7197       case C1LS:
7198       case C2LS:
7199         break;
7200       case FLOAT:
7201       case FCONV:
7202         break;
7203       case FCOMP:
7204         break;
7205       case C2OP:
7206       case SYSCALL:
7207       case HLECALL:
7208         break;
7209       default:
7210         break;
7211     }
7212     is32|=1;
7213     p32[i]=is32;
7214
7215     if(i>0)
7216     {
7217       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
7218       {
7219         if(rt1[i-1]==31) // JAL/JALR
7220         {
7221           // Subroutine call will return here, don't alloc any registers
7222           is32=1;
7223         }
7224         else if(i+1<slen)
7225         {
7226           // Internal branch will jump here, match registers to caller
7227           is32=0x3FFFFFFFFLL;
7228         }
7229       }
7230     }
7231   }
7232 }
7233
7234 // Identify registers which may be assumed to contain 32-bit values
7235 // and where optimizations will rely on this.
7236 // This is used to determine whether backward branches can safely
7237 // jump to a location with 64-bit values in registers.
7238 static void provisional_r32()
7239 {
7240   u_int r32=0;
7241   int i;
7242   
7243   for (i=slen-1;i>=0;i--)
7244   {
7245     int hr;
7246     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7247     {
7248       if(ba[i]<start || ba[i]>=(start+slen*4))
7249       {
7250         // Branch out of this block, don't need anything
7251         r32=0;
7252       }
7253       else
7254       {
7255         // Internal branch
7256         // Need whatever matches the target
7257         // (and doesn't get overwritten by the delay slot instruction)
7258         r32=0;
7259         int t=(ba[i]-start)>>2;
7260         if(ba[i]>start+i*4) {
7261           // Forward branch
7262           //if(!(requires_32bit[t]&~regs[i].was32))
7263           //  r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7264           if(!(pr32[t]&~regs[i].was32))
7265             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7266         }else{
7267           // Backward branch
7268           if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
7269             r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
7270         }
7271       }
7272       // Conditional branch may need registers for following instructions
7273       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
7274       {
7275         if(i<slen-2) {
7276           //r32|=requires_32bit[i+2];
7277           r32|=pr32[i+2];
7278           r32&=regs[i].was32;
7279           // Mark this address as a branch target since it may be called
7280           // upon return from interrupt
7281           //bt[i+2]=1;
7282         }
7283       }
7284       // Merge in delay slot
7285       if(!likely[i]) {
7286         // These are overwritten unless the branch is "likely"
7287         // and the delay slot is nullified if not taken
7288         r32&=~(1LL<<rt1[i+1]);
7289         r32&=~(1LL<<rt2[i+1]);
7290       }
7291       // Assume these are needed (delay slot)
7292       if(us1[i+1]>0)
7293       {
7294         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
7295       }
7296       if(us2[i+1]>0)
7297       {
7298         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
7299       }
7300       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
7301       {
7302         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
7303       }
7304       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
7305       {
7306         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
7307       }
7308     }
7309     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7310     {
7311       // SYSCALL instruction (software interrupt)
7312       r32=0;
7313     }
7314     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7315     {
7316       // ERET instruction (return from interrupt)
7317       r32=0;
7318     }
7319     // Check 32 bits
7320     r32&=~(1LL<<rt1[i]);
7321     r32&=~(1LL<<rt2[i]);
7322     if(us1[i]>0)
7323     {
7324       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
7325     }
7326     if(us2[i]>0)
7327     {
7328       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
7329     }
7330     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
7331     {
7332       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
7333     }
7334     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
7335     {
7336       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
7337     }
7338     //requires_32bit[i]=r32;
7339     pr32[i]=r32;
7340     
7341     // Dirty registers which are 32-bit, require 32-bit input
7342     // as they will be written as 32-bit values
7343     for(hr=0;hr<HOST_REGS;hr++)
7344     {
7345       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
7346         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
7347           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
7348           pr32[i]|=1LL<<regs[i].regmap_entry[hr];
7349           //requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
7350         }
7351       }
7352     }
7353   }
7354 }
7355
7356 // Write back dirty registers as soon as we will no longer modify them,
7357 // so that we don't end up with lots of writes at the branches.
7358 void clean_registers(int istart,int iend,int wr)
7359 {
7360   int i;
7361   int r;
7362   u_int will_dirty_i,will_dirty_next,temp_will_dirty;
7363   u_int wont_dirty_i,wont_dirty_next,temp_wont_dirty;
7364   if(iend==slen-1) {
7365     will_dirty_i=will_dirty_next=0;
7366     wont_dirty_i=wont_dirty_next=0;
7367   }else{
7368     will_dirty_i=will_dirty_next=will_dirty[iend+1];
7369     wont_dirty_i=wont_dirty_next=wont_dirty[iend+1];
7370   }
7371   for (i=iend;i>=istart;i--)
7372   {
7373     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7374     {
7375       if(ba[i]<start || ba[i]>=(start+slen*4))
7376       {
7377         // Branch out of this block, flush all regs
7378         if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7379         {
7380           // Unconditional branch
7381           will_dirty_i=0;
7382           wont_dirty_i=0;
7383           // Merge in delay slot (will dirty)
7384           for(r=0;r<HOST_REGS;r++) {
7385             if(r!=EXCLUDE_REG) {
7386               if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7387               if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7388               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7389               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7390               if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7391               if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7392               if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7393               if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7394               if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7395               if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7396               if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7397               if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7398               if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7399               if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7400             }
7401           }
7402         }
7403         else
7404         {
7405           // Conditional branch
7406           will_dirty_i=0;
7407           wont_dirty_i=wont_dirty_next;
7408           // Merge in delay slot (will dirty)
7409           for(r=0;r<HOST_REGS;r++) {
7410             if(r!=EXCLUDE_REG) {
7411               if(!likely[i]) {
7412                 // Might not dirty if likely branch is not taken
7413                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7414                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7415                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7416                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7417                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7418                 if(branch_regs[i].regmap[r]==0) will_dirty_i&=~(1<<r);
7419                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7420                 //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7421                 //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7422                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7423                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7424                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7425                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7426                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7427               }
7428             }
7429           }
7430         }
7431         // Merge in delay slot (wont dirty)
7432         for(r=0;r<HOST_REGS;r++) {
7433           if(r!=EXCLUDE_REG) {
7434             if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7435             if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7436             if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7437             if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7438             if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7439             if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7440             if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7441             if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7442             if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7443             if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7444           }
7445         }
7446         if(wr) {
7447           #ifndef DESTRUCTIVE_WRITEBACK
7448           branch_regs[i].dirty&=wont_dirty_i;
7449           #endif
7450           branch_regs[i].dirty|=will_dirty_i;
7451         }
7452       }
7453       else
7454       {
7455         // Internal branch
7456         if(ba[i]<=start+i*4) {
7457           // Backward branch
7458           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7459           {
7460             // Unconditional branch
7461             temp_will_dirty=0;
7462             temp_wont_dirty=0;
7463             // Merge in delay slot (will dirty)
7464             for(r=0;r<HOST_REGS;r++) {
7465               if(r!=EXCLUDE_REG) {
7466                 if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7467                 if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7468                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7469                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7470                 if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7471                 if(branch_regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7472                 if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7473                 if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7474                 if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7475                 if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7476                 if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7477                 if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7478                 if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7479                 if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7480               }
7481             }
7482           } else {
7483             // Conditional branch (not taken case)
7484             temp_will_dirty=will_dirty_next;
7485             temp_wont_dirty=wont_dirty_next;
7486             // Merge in delay slot (will dirty)
7487             for(r=0;r<HOST_REGS;r++) {
7488               if(r!=EXCLUDE_REG) {
7489                 if(!likely[i]) {
7490                   // Will not dirty if likely branch is not taken
7491                   if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7492                   if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7493                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7494                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7495                   if((branch_regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7496                   if(branch_regs[i].regmap[r]==0) temp_will_dirty&=~(1<<r);
7497                   if(branch_regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7498                   //if((regs[i].regmap[r]&63)==rt1[i]) temp_will_dirty|=1<<r;
7499                   //if((regs[i].regmap[r]&63)==rt2[i]) temp_will_dirty|=1<<r;
7500                   if((regs[i].regmap[r]&63)==rt1[i+1]) temp_will_dirty|=1<<r;
7501                   if((regs[i].regmap[r]&63)==rt2[i+1]) temp_will_dirty|=1<<r;
7502                   if((regs[i].regmap[r]&63)>33) temp_will_dirty&=~(1<<r);
7503                   if(regs[i].regmap[r]<=0) temp_will_dirty&=~(1<<r);
7504                   if(regs[i].regmap[r]==CCREG) temp_will_dirty|=1<<r;
7505                 }
7506               }
7507             }
7508           }
7509           // Merge in delay slot (wont dirty)
7510           for(r=0;r<HOST_REGS;r++) {
7511             if(r!=EXCLUDE_REG) {
7512               if((regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7513               if((regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7514               if((regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7515               if((regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7516               if(regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7517               if((branch_regs[i].regmap[r]&63)==rt1[i]) temp_wont_dirty|=1<<r;
7518               if((branch_regs[i].regmap[r]&63)==rt2[i]) temp_wont_dirty|=1<<r;
7519               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) temp_wont_dirty|=1<<r;
7520               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) temp_wont_dirty|=1<<r;
7521               if(branch_regs[i].regmap[r]==CCREG) temp_wont_dirty|=1<<r;
7522             }
7523           }
7524           // Deal with changed mappings
7525           if(i<iend) {
7526             for(r=0;r<HOST_REGS;r++) {
7527               if(r!=EXCLUDE_REG) {
7528                 if(regs[i].regmap[r]!=regmap_pre[i][r]) {
7529                   temp_will_dirty&=~(1<<r);
7530                   temp_wont_dirty&=~(1<<r);
7531                   if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7532                     temp_will_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7533                     temp_wont_dirty|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7534                   } else {
7535                     temp_will_dirty|=1<<r;
7536                     temp_wont_dirty|=1<<r;
7537                   }
7538                 }
7539               }
7540             }
7541           }
7542           if(wr) {
7543             will_dirty[i]=temp_will_dirty;
7544             wont_dirty[i]=temp_wont_dirty;
7545             clean_registers((ba[i]-start)>>2,i-1,0);
7546           }else{
7547             // Limit recursion.  It can take an excessive amount
7548             // of time if there are a lot of nested loops.
7549             will_dirty[(ba[i]-start)>>2]=0;
7550             wont_dirty[(ba[i]-start)>>2]=-1;
7551           }
7552         }
7553         /*else*/ if(1)
7554         {
7555           if(itype[i]==RJUMP||itype[i]==UJUMP||(source[i]>>16)==0x1000)
7556           {
7557             // Unconditional branch
7558             will_dirty_i=0;
7559             wont_dirty_i=0;
7560           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7561             for(r=0;r<HOST_REGS;r++) {
7562               if(r!=EXCLUDE_REG) {
7563                 if(branch_regs[i].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7564                   will_dirty_i|=will_dirty[(ba[i]-start)>>2]&(1<<r);
7565                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7566                 }
7567                 if(branch_regs[i].regmap[r]>=0) {
7568                   will_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7569                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(branch_regs[i].regmap[r]&63))&1)<<r;
7570                 }
7571               }
7572             }
7573           //}
7574             // Merge in delay slot
7575             for(r=0;r<HOST_REGS;r++) {
7576               if(r!=EXCLUDE_REG) {
7577                 if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7578                 if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7579                 if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7580                 if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7581                 if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7582                 if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7583                 if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7584                 if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7585                 if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7586                 if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7587                 if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7588                 if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7589                 if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7590                 if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7591               }
7592             }
7593           } else {
7594             // Conditional branch
7595             will_dirty_i=will_dirty_next;
7596             wont_dirty_i=wont_dirty_next;
7597           //if(ba[i]>start+i*4) { // Disable recursion (for debugging)
7598             for(r=0;r<HOST_REGS;r++) {
7599               if(r!=EXCLUDE_REG) {
7600                 signed char target_reg=branch_regs[i].regmap[r];
7601                 if(target_reg==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7602                   will_dirty_i&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7603                   wont_dirty_i|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7604                 }
7605                 else if(target_reg>=0) {
7606                   will_dirty_i&=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7607                   wont_dirty_i|=((unneeded_reg[(ba[i]-start)>>2]>>(target_reg&63))&1)<<r;
7608                 }
7609                 // Treat delay slot as part of branch too
7610                 /*if(regs[i+1].regmap[r]==regs[(ba[i]-start)>>2].regmap_entry[r]) {
7611                   will_dirty[i+1]&=will_dirty[(ba[i]-start)>>2]&(1<<r);
7612                   wont_dirty[i+1]|=wont_dirty[(ba[i]-start)>>2]&(1<<r);
7613                 }
7614                 else
7615                 {
7616                   will_dirty[i+1]&=~(1<<r);
7617                 }*/
7618               }
7619             }
7620           //}
7621             // Merge in delay slot
7622             for(r=0;r<HOST_REGS;r++) {
7623               if(r!=EXCLUDE_REG) {
7624                 if(!likely[i]) {
7625                   // Might not dirty if likely branch is not taken
7626                   if((branch_regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7627                   if((branch_regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7628                   if((branch_regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7629                   if((branch_regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7630                   if((branch_regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7631                   if(branch_regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7632                   if(branch_regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7633                   //if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7634                   //if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7635                   if((regs[i].regmap[r]&63)==rt1[i+1]) will_dirty_i|=1<<r;
7636                   if((regs[i].regmap[r]&63)==rt2[i+1]) will_dirty_i|=1<<r;
7637                   if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7638                   if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7639                   if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7640                 }
7641               }
7642             }
7643           }
7644           // Merge in delay slot (won't dirty)
7645           for(r=0;r<HOST_REGS;r++) {
7646             if(r!=EXCLUDE_REG) {
7647               if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7648               if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7649               if((regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7650               if((regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7651               if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7652               if((branch_regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7653               if((branch_regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7654               if((branch_regs[i].regmap[r]&63)==rt1[i+1]) wont_dirty_i|=1<<r;
7655               if((branch_regs[i].regmap[r]&63)==rt2[i+1]) wont_dirty_i|=1<<r;
7656               if(branch_regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7657             }
7658           }
7659           if(wr) {
7660             #ifndef DESTRUCTIVE_WRITEBACK
7661             branch_regs[i].dirty&=wont_dirty_i;
7662             #endif
7663             branch_regs[i].dirty|=will_dirty_i;
7664           }
7665         }
7666       }
7667     }
7668     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
7669     {
7670       // SYSCALL instruction (software interrupt)
7671       will_dirty_i=0;
7672       wont_dirty_i=0;
7673     }
7674     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
7675     {
7676       // ERET instruction (return from interrupt)
7677       will_dirty_i=0;
7678       wont_dirty_i=0;
7679     }
7680     will_dirty_next=will_dirty_i;
7681     wont_dirty_next=wont_dirty_i;
7682     for(r=0;r<HOST_REGS;r++) {
7683       if(r!=EXCLUDE_REG) {
7684         if((regs[i].regmap[r]&63)==rt1[i]) will_dirty_i|=1<<r;
7685         if((regs[i].regmap[r]&63)==rt2[i]) will_dirty_i|=1<<r;
7686         if((regs[i].regmap[r]&63)>33) will_dirty_i&=~(1<<r);
7687         if(regs[i].regmap[r]<=0) will_dirty_i&=~(1<<r);
7688         if(regs[i].regmap[r]==CCREG) will_dirty_i|=1<<r;
7689         if((regs[i].regmap[r]&63)==rt1[i]) wont_dirty_i|=1<<r;
7690         if((regs[i].regmap[r]&63)==rt2[i]) wont_dirty_i|=1<<r;
7691         if(regs[i].regmap[r]==CCREG) wont_dirty_i|=1<<r;
7692         if(i>istart) {
7693           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=FJUMP) 
7694           {
7695             // Don't store a register immediately after writing it,
7696             // may prevent dual-issue.
7697             if((regs[i].regmap[r]&63)==rt1[i-1]) wont_dirty_i|=1<<r;
7698             if((regs[i].regmap[r]&63)==rt2[i-1]) wont_dirty_i|=1<<r;
7699           }
7700         }
7701       }
7702     }
7703     // Save it
7704     will_dirty[i]=will_dirty_i;
7705     wont_dirty[i]=wont_dirty_i;
7706     // Mark registers that won't be dirtied as not dirty
7707     if(wr) {
7708       /*printf("wr (%d,%d) %x will:",istart,iend,start+i*4);
7709       for(r=0;r<HOST_REGS;r++) {
7710         if((will_dirty_i>>r)&1) {
7711           printf(" r%d",r);
7712         }
7713       }
7714       printf("\n");*/
7715
7716       //if(i==istart||(itype[i-1]!=RJUMP&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=FJUMP)) {
7717         regs[i].dirty|=will_dirty_i;
7718         #ifndef DESTRUCTIVE_WRITEBACK
7719         regs[i].dirty&=wont_dirty_i;
7720         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
7721         {
7722           if(i<iend-1&&itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
7723             for(r=0;r<HOST_REGS;r++) {
7724               if(r!=EXCLUDE_REG) {
7725                 if(regs[i].regmap[r]==regmap_pre[i+2][r]) {
7726                   regs[i+2].wasdirty&=wont_dirty_i|~(1<<r);
7727                 }else {/*printf("i: %x (%d) mismatch(+2): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7728               }
7729             }
7730           }
7731         }
7732         else
7733         {
7734           if(i<iend) {
7735             for(r=0;r<HOST_REGS;r++) {
7736               if(r!=EXCLUDE_REG) {
7737                 if(regs[i].regmap[r]==regmap_pre[i+1][r]) {
7738                   regs[i+1].wasdirty&=wont_dirty_i|~(1<<r);
7739                 }else {/*printf("i: %x (%d) mismatch(+1): %d\n",start+i*4,i,r);/*assert(!((wont_dirty_i>>r)&1));*/}
7740               }
7741             }
7742           }
7743         }
7744         #endif
7745       //}
7746     }
7747     // Deal with changed mappings
7748     temp_will_dirty=will_dirty_i;
7749     temp_wont_dirty=wont_dirty_i;
7750     for(r=0;r<HOST_REGS;r++) {
7751       if(r!=EXCLUDE_REG) {
7752         int nr;
7753         if(regs[i].regmap[r]==regmap_pre[i][r]) {
7754           if(wr) {
7755             #ifndef DESTRUCTIVE_WRITEBACK
7756             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7757             #endif
7758             regs[i].wasdirty|=will_dirty_i&(1<<r);
7759           }
7760         }
7761         else if(regmap_pre[i][r]>=0&&(nr=get_reg(regs[i].regmap,regmap_pre[i][r]))>=0) {
7762           // Register moved to a different register
7763           will_dirty_i&=~(1<<r);
7764           wont_dirty_i&=~(1<<r);
7765           will_dirty_i|=((temp_will_dirty>>nr)&1)<<r;
7766           wont_dirty_i|=((temp_wont_dirty>>nr)&1)<<r;
7767           if(wr) {
7768             #ifndef DESTRUCTIVE_WRITEBACK
7769             regs[i].wasdirty&=wont_dirty_i|~(1<<r);
7770             #endif
7771             regs[i].wasdirty|=will_dirty_i&(1<<r);
7772           }
7773         }
7774         else {
7775           will_dirty_i&=~(1<<r);
7776           wont_dirty_i&=~(1<<r);
7777           if((regmap_pre[i][r]&63)>0 && (regmap_pre[i][r]&63)<34) {
7778             will_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7779             wont_dirty_i|=((unneeded_reg[i]>>(regmap_pre[i][r]&63))&1)<<r;
7780           } else {
7781             wont_dirty_i|=1<<r;
7782             /*printf("i: %x (%d) mismatch: %d\n",start+i*4,i,r);/*assert(!((will_dirty>>r)&1));*/
7783           }
7784         }
7785       }
7786     }
7787   }
7788 }
7789
7790 #ifdef DISASM
7791   /* disassembly */
7792 void disassemble_inst(int i)
7793 {
7794     if (bt[i]) printf("*"); else printf(" ");
7795     switch(itype[i]) {
7796       case UJUMP:
7797         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7798       case CJUMP:
7799         printf (" %x: %s r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],i?start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14):*ba);break;
7800       case SJUMP:
7801         printf (" %x: %s r%d,%8x\n",start+i*4,insn[i],rs1[i],start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14));break;
7802       case FJUMP:
7803         printf (" %x: %s %8x\n",start+i*4,insn[i],ba[i]);break;
7804       case RJUMP:
7805         if (opcode[i]==0x9&&rt1[i]!=31)
7806           printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i]);
7807         else
7808           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7809         break;
7810       case SPAN:
7811         printf (" %x: %s (pagespan) r%d,r%d,%8x\n",start+i*4,insn[i],rs1[i],rs2[i],ba[i]);break;
7812       case IMM16:
7813         if(opcode[i]==0xf) //LUI
7814           printf (" %x: %s r%d,%4x0000\n",start+i*4,insn[i],rt1[i],imm[i]&0xffff);
7815         else
7816           printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7817         break;
7818       case LOAD:
7819       case LOADLR:
7820         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7821         break;
7822       case STORE:
7823       case STORELR:
7824         printf (" %x: %s r%d,r%d+%x\n",start+i*4,insn[i],rs2[i],rs1[i],imm[i]);
7825         break;
7826       case ALU:
7827       case SHIFT:
7828         printf (" %x: %s r%d,r%d,r%d\n",start+i*4,insn[i],rt1[i],rs1[i],rs2[i]);
7829         break;
7830       case MULTDIV:
7831         printf (" %x: %s r%d,r%d\n",start+i*4,insn[i],rs1[i],rs2[i]);
7832         break;
7833       case SHIFTIMM:
7834         printf (" %x: %s r%d,r%d,%d\n",start+i*4,insn[i],rt1[i],rs1[i],imm[i]);
7835         break;
7836       case MOV:
7837         if((opcode2[i]&0x1d)==0x10)
7838           printf (" %x: %s r%d\n",start+i*4,insn[i],rt1[i]);
7839         else if((opcode2[i]&0x1d)==0x11)
7840           printf (" %x: %s r%d\n",start+i*4,insn[i],rs1[i]);
7841         else
7842           printf (" %x: %s\n",start+i*4,insn[i]);
7843         break;
7844       case COP0:
7845         if(opcode2[i]==0)
7846           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC0
7847         else if(opcode2[i]==4)
7848           printf (" %x: %s r%d,cpr0[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC0
7849         else printf (" %x: %s\n",start+i*4,insn[i]);
7850         break;
7851       case COP1:
7852         if(opcode2[i]<3)
7853           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC1
7854         else if(opcode2[i]>3)
7855           printf (" %x: %s r%d,cpr1[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC1
7856         else printf (" %x: %s\n",start+i*4,insn[i]);
7857         break;
7858       case COP2:
7859         if(opcode2[i]<3)
7860           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rt1[i],(source[i]>>11)&0x1f); // MFC2
7861         else if(opcode2[i]>3)
7862           printf (" %x: %s r%d,cpr2[%d]\n",start+i*4,insn[i],rs1[i],(source[i]>>11)&0x1f); // MTC2
7863         else printf (" %x: %s\n",start+i*4,insn[i]);
7864         break;
7865       case C1LS:
7866         printf (" %x: %s cpr1[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7867         break;
7868       case C2LS:
7869         printf (" %x: %s cpr2[%d],r%d+%x\n",start+i*4,insn[i],(source[i]>>16)&0x1f,rs1[i],imm[i]);
7870         break;
7871       case INTCALL:
7872         printf (" %x: %s (INTCALL)\n",start+i*4,insn[i]);
7873         break;
7874       default:
7875         //printf (" %s %8x\n",insn[i],source[i]);
7876         printf (" %x: %s\n",start+i*4,insn[i]);
7877     }
7878 }
7879 #else
7880 static void disassemble_inst(int i) {}
7881 #endif // DISASM
7882
7883 // clear the state completely, instead of just marking
7884 // things invalid like invalidate_all_pages() does
7885 void new_dynarec_clear_full()
7886 {
7887   int n;
7888   out=(u_char *)BASE_ADDR;
7889   memset(invalid_code,1,sizeof(invalid_code));
7890   memset(hash_table,0xff,sizeof(hash_table));
7891   memset(mini_ht,-1,sizeof(mini_ht));
7892   memset(restore_candidate,0,sizeof(restore_candidate));
7893   memset(shadow,0,sizeof(shadow));
7894   copy=shadow;
7895   expirep=16384; // Expiry pointer, +2 blocks
7896   pending_exception=0;
7897   literalcount=0;
7898   stop_after_jal=0;
7899   inv_code_start=inv_code_end=~0;
7900   gte_reads_flags=0;
7901   // TLB
7902 #ifndef DISABLE_TLB
7903   using_tlb=0;
7904 #endif
7905   sp_in_mirror=0;
7906   for(n=0;n<524288;n++) // 0 .. 0x7FFFFFFF
7907     memory_map[n]=-1;
7908   for(n=524288;n<526336;n++) // 0x80000000 .. 0x807FFFFF
7909     memory_map[n]=((u_int)rdram-0x80000000)>>2;
7910   for(n=526336;n<1048576;n++) // 0x80800000 .. 0xFFFFFFFF
7911     memory_map[n]=-1;
7912   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7913   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7914   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7915 }
7916
7917 void new_dynarec_init()
7918 {
7919   printf("Init new dynarec\n");
7920   out=(u_char *)BASE_ADDR;
7921   if (mmap (out, 1<<TARGET_SIZE_2,
7922             PROT_READ | PROT_WRITE | PROT_EXEC,
7923             MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
7924             -1, 0) <= 0) {printf("mmap() failed\n");}
7925 #ifdef MUPEN64
7926   rdword=&readmem_dword;
7927   fake_pc.f.r.rs=&readmem_dword;
7928   fake_pc.f.r.rt=&readmem_dword;
7929   fake_pc.f.r.rd=&readmem_dword;
7930 #endif
7931   int n;
7932   new_dynarec_clear_full();
7933 #ifdef HOST_IMM8
7934   // Copy this into local area so we don't have to put it in every literal pool
7935   invc_ptr=invalid_code;
7936 #endif
7937 #ifdef MUPEN64
7938   for(n=0;n<0x8000;n++) { // 0 .. 0x7FFFFFFF
7939     writemem[n] = write_nomem_new;
7940     writememb[n] = write_nomemb_new;
7941     writememh[n] = write_nomemh_new;
7942 #ifndef FORCE32
7943     writememd[n] = write_nomemd_new;
7944 #endif
7945     readmem[n] = read_nomem_new;
7946     readmemb[n] = read_nomemb_new;
7947     readmemh[n] = read_nomemh_new;
7948 #ifndef FORCE32
7949     readmemd[n] = read_nomemd_new;
7950 #endif
7951   }
7952   for(n=0x8000;n<0x8080;n++) { // 0x80000000 .. 0x807FFFFF
7953     writemem[n] = write_rdram_new;
7954     writememb[n] = write_rdramb_new;
7955     writememh[n] = write_rdramh_new;
7956 #ifndef FORCE32
7957     writememd[n] = write_rdramd_new;
7958 #endif
7959   }
7960   for(n=0xC000;n<0x10000;n++) { // 0xC0000000 .. 0xFFFFFFFF
7961     writemem[n] = write_nomem_new;
7962     writememb[n] = write_nomemb_new;
7963     writememh[n] = write_nomemh_new;
7964 #ifndef FORCE32
7965     writememd[n] = write_nomemd_new;
7966 #endif
7967     readmem[n] = read_nomem_new;
7968     readmemb[n] = read_nomemb_new;
7969     readmemh[n] = read_nomemh_new;
7970 #ifndef FORCE32
7971     readmemd[n] = read_nomemd_new;
7972 #endif
7973   }
7974 #endif
7975   tlb_hacks();
7976   arch_init();
7977 }
7978
7979 void new_dynarec_cleanup()
7980 {
7981   int n;
7982   if (munmap ((void *)BASE_ADDR, 1<<TARGET_SIZE_2) < 0) {printf("munmap() failed\n");}
7983   for(n=0;n<4096;n++) ll_clear(jump_in+n);
7984   for(n=0;n<4096;n++) ll_clear(jump_out+n);
7985   for(n=0;n<4096;n++) ll_clear(jump_dirty+n);
7986   #ifdef ROM_COPY
7987   if (munmap (ROM_COPY, 67108864) < 0) {printf("munmap() failed\n");}
7988   #endif
7989 }
7990
7991 int new_recompile_block(int addr)
7992 {
7993 /*
7994   if(addr==0x800cd050) {
7995     int block;
7996     for(block=0x80000;block<0x80800;block++) invalidate_block(block);
7997     int n;
7998     for(n=0;n<=2048;n++) ll_clear(jump_dirty+n);
7999   }
8000 */
8001   //if(Count==365117028) tracedebug=1;
8002   assem_debug("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8003   //printf("NOTCOMPILED: addr = %x -> %x\n", (int)addr, (int)out);
8004   //printf("TRACE: count=%d next=%d (compile %x)\n",Count,next_interupt,addr);
8005   //if(debug) 
8006   //printf("TRACE: count=%d next=%d (checksum %x)\n",Count,next_interupt,mchecksum());
8007   //printf("fpu mapping=%x enabled=%x\n",(Status & 0x04000000)>>26,(Status & 0x20000000)>>29);
8008   /*if(Count>=312978186) {
8009     rlist();
8010   }*/
8011   //rlist();
8012   start = (u_int)addr&~3;
8013   //assert(((u_int)addr&1)==0);
8014 #ifdef PCSX
8015   if(!sp_in_mirror&&(signed int)(psxRegs.GPR.n.sp&0xffe00000)>0x80200000&&
8016      0x10000<=psxRegs.GPR.n.sp&&(psxRegs.GPR.n.sp&~0xe0e00000)<RAM_SIZE) {
8017     printf("SP hack enabled (%08x), @%08x\n", psxRegs.GPR.n.sp, psxRegs.pc);
8018     sp_in_mirror=1;
8019   }
8020   if (Config.HLE && start == 0x80001000) // hlecall
8021   {
8022     // XXX: is this enough? Maybe check hleSoftCall?
8023     u_int beginning=(u_int)out;
8024     u_int page=get_page(start);
8025     invalid_code[start>>12]=0;
8026     emit_movimm(start,0);
8027     emit_writeword(0,(int)&pcaddr);
8028     emit_jmp((int)new_dyna_leave);
8029     literal_pool(0);
8030 #ifdef __arm__
8031     __clear_cache((void *)beginning,out);
8032 #endif
8033     ll_add(jump_in+page,start,(void *)beginning);
8034     return 0;
8035   }
8036   else if ((u_int)addr < 0x00200000 ||
8037     (0xa0000000 <= addr && addr < 0xa0200000)) {
8038     // used for BIOS calls mostly?
8039     source = (u_int *)((u_int)rdram+(start&0x1fffff));
8040     pagelimit = (addr&0xa0000000)|0x00200000;
8041   }
8042   else if (!Config.HLE && (
8043 /*    (0x9fc00000 <= addr && addr < 0x9fc80000) ||*/
8044     (0xbfc00000 <= addr && addr < 0xbfc80000))) {
8045     // BIOS
8046     source = (u_int *)((u_int)psxR+(start&0x7ffff));
8047     pagelimit = (addr&0xfff00000)|0x80000;
8048   }
8049   else
8050 #endif
8051 #ifdef MUPEN64
8052   if ((int)addr >= 0xa4000000 && (int)addr < 0xa4001000) {
8053     source = (u_int *)((u_int)SP_DMEM+start-0xa4000000);
8054     pagelimit = 0xa4001000;
8055   }
8056   else
8057 #endif
8058   if ((int)addr >= 0x80000000 && (int)addr < 0x80000000+RAM_SIZE) {
8059     source = (u_int *)((u_int)rdram+start-0x80000000);
8060     pagelimit = 0x80000000+RAM_SIZE;
8061   }
8062 #ifndef DISABLE_TLB
8063   else if ((signed int)addr >= (signed int)0xC0000000) {
8064     //printf("addr=%x mm=%x\n",(u_int)addr,(memory_map[start>>12]<<2));
8065     //if(tlb_LUT_r[start>>12])
8066       //source = (u_int *)(((int)rdram)+(tlb_LUT_r[start>>12]&0xFFFFF000)+(((int)addr)&0xFFF)-0x80000000);
8067     if((signed int)memory_map[start>>12]>=0) {
8068       source = (u_int *)((u_int)(start+(memory_map[start>>12]<<2)));
8069       pagelimit=(start+4096)&0xFFFFF000;
8070       int map=memory_map[start>>12];
8071       int i;
8072       for(i=0;i<5;i++) {
8073         //printf("start: %x next: %x\n",map,memory_map[pagelimit>>12]);
8074         if((map&0xBFFFFFFF)==(memory_map[pagelimit>>12]&0xBFFFFFFF)) pagelimit+=4096;
8075       }
8076       assem_debug("pagelimit=%x\n",pagelimit);
8077       assem_debug("mapping=%x (%x)\n",memory_map[start>>12],(memory_map[start>>12]<<2)+start);
8078     }
8079     else {
8080       assem_debug("Compile at unmapped memory address: %x \n", (int)addr);
8081       //assem_debug("start: %x next: %x\n",memory_map[start>>12],memory_map[(start+4096)>>12]);
8082       return -1; // Caller will invoke exception handler
8083     }
8084     //printf("source= %x\n",(int)source);
8085   }
8086 #endif
8087   else {
8088     printf("Compile at bogus memory address: %x \n", (int)addr);
8089     exit(1);
8090   }
8091
8092   /* Pass 1: disassemble */
8093   /* Pass 2: register dependencies, branch targets */
8094   /* Pass 3: register allocation */
8095   /* Pass 4: branch dependencies */
8096   /* Pass 5: pre-alloc */
8097   /* Pass 6: optimize clean/dirty state */
8098   /* Pass 7: flag 32-bit registers */
8099   /* Pass 8: assembly */
8100   /* Pass 9: linker */
8101   /* Pass 10: garbage collection / free memory */
8102
8103   int i,j;
8104   int done=0;
8105   unsigned int type,op,op2;
8106
8107   //printf("addr = %x source = %x %x\n", addr,source,source[0]);
8108   
8109   /* Pass 1 disassembly */
8110
8111   for(i=0;!done;i++) {
8112     bt[i]=0;likely[i]=0;ooo[i]=0;op2=0;
8113     minimum_free_regs[i]=0;
8114     opcode[i]=op=source[i]>>26;
8115     switch(op)
8116     {
8117       case 0x00: strcpy(insn[i],"special"); type=NI;
8118         op2=source[i]&0x3f;
8119         switch(op2)
8120         {
8121           case 0x00: strcpy(insn[i],"SLL"); type=SHIFTIMM; break;
8122           case 0x02: strcpy(insn[i],"SRL"); type=SHIFTIMM; break;
8123           case 0x03: strcpy(insn[i],"SRA"); type=SHIFTIMM; break;
8124           case 0x04: strcpy(insn[i],"SLLV"); type=SHIFT; break;
8125           case 0x06: strcpy(insn[i],"SRLV"); type=SHIFT; break;
8126           case 0x07: strcpy(insn[i],"SRAV"); type=SHIFT; break;
8127           case 0x08: strcpy(insn[i],"JR"); type=RJUMP; break;
8128           case 0x09: strcpy(insn[i],"JALR"); type=RJUMP; break;
8129           case 0x0C: strcpy(insn[i],"SYSCALL"); type=SYSCALL; break;
8130           case 0x0D: strcpy(insn[i],"BREAK"); type=OTHER; break;
8131           case 0x0F: strcpy(insn[i],"SYNC"); type=OTHER; break;
8132           case 0x10: strcpy(insn[i],"MFHI"); type=MOV; break;
8133           case 0x11: strcpy(insn[i],"MTHI"); type=MOV; break;
8134           case 0x12: strcpy(insn[i],"MFLO"); type=MOV; break;
8135           case 0x13: strcpy(insn[i],"MTLO"); type=MOV; break;
8136           case 0x18: strcpy(insn[i],"MULT"); type=MULTDIV; break;
8137           case 0x19: strcpy(insn[i],"MULTU"); type=MULTDIV; break;
8138           case 0x1A: strcpy(insn[i],"DIV"); type=MULTDIV; break;
8139           case 0x1B: strcpy(insn[i],"DIVU"); type=MULTDIV; break;
8140           case 0x20: strcpy(insn[i],"ADD"); type=ALU; break;
8141           case 0x21: strcpy(insn[i],"ADDU"); type=ALU; break;
8142           case 0x22: strcpy(insn[i],"SUB"); type=ALU; break;
8143           case 0x23: strcpy(insn[i],"SUBU"); type=ALU; break;
8144           case 0x24: strcpy(insn[i],"AND"); type=ALU; break;
8145           case 0x25: strcpy(insn[i],"OR"); type=ALU; break;
8146           case 0x26: strcpy(insn[i],"XOR"); type=ALU; break;
8147           case 0x27: strcpy(insn[i],"NOR"); type=ALU; break;
8148           case 0x2A: strcpy(insn[i],"SLT"); type=ALU; break;
8149           case 0x2B: strcpy(insn[i],"SLTU"); type=ALU; break;
8150           case 0x30: strcpy(insn[i],"TGE"); type=NI; break;
8151           case 0x31: strcpy(insn[i],"TGEU"); type=NI; break;
8152           case 0x32: strcpy(insn[i],"TLT"); type=NI; break;
8153           case 0x33: strcpy(insn[i],"TLTU"); type=NI; break;
8154           case 0x34: strcpy(insn[i],"TEQ"); type=NI; break;
8155           case 0x36: strcpy(insn[i],"TNE"); type=NI; break;
8156 #ifndef FORCE32
8157           case 0x14: strcpy(insn[i],"DSLLV"); type=SHIFT; break;
8158           case 0x16: strcpy(insn[i],"DSRLV"); type=SHIFT; break;
8159           case 0x17: strcpy(insn[i],"DSRAV"); type=SHIFT; break;
8160           case 0x1C: strcpy(insn[i],"DMULT"); type=MULTDIV; break;
8161           case 0x1D: strcpy(insn[i],"DMULTU"); type=MULTDIV; break;
8162           case 0x1E: strcpy(insn[i],"DDIV"); type=MULTDIV; break;
8163           case 0x1F: strcpy(insn[i],"DDIVU"); type=MULTDIV; break;
8164           case 0x2C: strcpy(insn[i],"DADD"); type=ALU; break;
8165           case 0x2D: strcpy(insn[i],"DADDU"); type=ALU; break;
8166           case 0x2E: strcpy(insn[i],"DSUB"); type=ALU; break;
8167           case 0x2F: strcpy(insn[i],"DSUBU"); type=ALU; break;
8168           case 0x38: strcpy(insn[i],"DSLL"); type=SHIFTIMM; break;
8169           case 0x3A: strcpy(insn[i],"DSRL"); type=SHIFTIMM; break;
8170           case 0x3B: strcpy(insn[i],"DSRA"); type=SHIFTIMM; break;
8171           case 0x3C: strcpy(insn[i],"DSLL32"); type=SHIFTIMM; break;
8172           case 0x3E: strcpy(insn[i],"DSRL32"); type=SHIFTIMM; break;
8173           case 0x3F: strcpy(insn[i],"DSRA32"); type=SHIFTIMM; break;
8174 #endif
8175         }
8176         break;
8177       case 0x01: strcpy(insn[i],"regimm"); type=NI;
8178         op2=(source[i]>>16)&0x1f;
8179         switch(op2)
8180         {
8181           case 0x00: strcpy(insn[i],"BLTZ"); type=SJUMP; break;
8182           case 0x01: strcpy(insn[i],"BGEZ"); type=SJUMP; break;
8183           case 0x02: strcpy(insn[i],"BLTZL"); type=SJUMP; break;
8184           case 0x03: strcpy(insn[i],"BGEZL"); type=SJUMP; break;
8185           case 0x08: strcpy(insn[i],"TGEI"); type=NI; break;
8186           case 0x09: strcpy(insn[i],"TGEIU"); type=NI; break;
8187           case 0x0A: strcpy(insn[i],"TLTI"); type=NI; break;
8188           case 0x0B: strcpy(insn[i],"TLTIU"); type=NI; break;
8189           case 0x0C: strcpy(insn[i],"TEQI"); type=NI; break;
8190           case 0x0E: strcpy(insn[i],"TNEI"); type=NI; break;
8191           case 0x10: strcpy(insn[i],"BLTZAL"); type=SJUMP; break;
8192           case 0x11: strcpy(insn[i],"BGEZAL"); type=SJUMP; break;
8193           case 0x12: strcpy(insn[i],"BLTZALL"); type=SJUMP; break;
8194           case 0x13: strcpy(insn[i],"BGEZALL"); type=SJUMP; break;
8195         }
8196         break;
8197       case 0x02: strcpy(insn[i],"J"); type=UJUMP; break;
8198       case 0x03: strcpy(insn[i],"JAL"); type=UJUMP; break;
8199       case 0x04: strcpy(insn[i],"BEQ"); type=CJUMP; break;
8200       case 0x05: strcpy(insn[i],"BNE"); type=CJUMP; break;
8201       case 0x06: strcpy(insn[i],"BLEZ"); type=CJUMP; break;
8202       case 0x07: strcpy(insn[i],"BGTZ"); type=CJUMP; break;
8203       case 0x08: strcpy(insn[i],"ADDI"); type=IMM16; break;
8204       case 0x09: strcpy(insn[i],"ADDIU"); type=IMM16; break;
8205       case 0x0A: strcpy(insn[i],"SLTI"); type=IMM16; break;
8206       case 0x0B: strcpy(insn[i],"SLTIU"); type=IMM16; break;
8207       case 0x0C: strcpy(insn[i],"ANDI"); type=IMM16; break;
8208       case 0x0D: strcpy(insn[i],"ORI"); type=IMM16; break;
8209       case 0x0E: strcpy(insn[i],"XORI"); type=IMM16; break;
8210       case 0x0F: strcpy(insn[i],"LUI"); type=IMM16; break;
8211       case 0x10: strcpy(insn[i],"cop0"); type=NI;
8212         op2=(source[i]>>21)&0x1f;
8213         switch(op2)
8214         {
8215           case 0x00: strcpy(insn[i],"MFC0"); type=COP0; break;
8216           case 0x04: strcpy(insn[i],"MTC0"); type=COP0; break;
8217           case 0x10: strcpy(insn[i],"tlb"); type=NI;
8218           switch(source[i]&0x3f)
8219           {
8220             case 0x01: strcpy(insn[i],"TLBR"); type=COP0; break;
8221             case 0x02: strcpy(insn[i],"TLBWI"); type=COP0; break;
8222             case 0x06: strcpy(insn[i],"TLBWR"); type=COP0; break;
8223             case 0x08: strcpy(insn[i],"TLBP"); type=COP0; break;
8224 #ifdef PCSX
8225             case 0x10: strcpy(insn[i],"RFE"); type=COP0; break;
8226 #else
8227             case 0x18: strcpy(insn[i],"ERET"); type=COP0; break;
8228 #endif
8229           }
8230         }
8231         break;
8232       case 0x11: strcpy(insn[i],"cop1"); type=NI;
8233         op2=(source[i]>>21)&0x1f;
8234         switch(op2)
8235         {
8236           case 0x00: strcpy(insn[i],"MFC1"); type=COP1; break;
8237           case 0x01: strcpy(insn[i],"DMFC1"); type=COP1; break;
8238           case 0x02: strcpy(insn[i],"CFC1"); type=COP1; break;
8239           case 0x04: strcpy(insn[i],"MTC1"); type=COP1; break;
8240           case 0x05: strcpy(insn[i],"DMTC1"); type=COP1; break;
8241           case 0x06: strcpy(insn[i],"CTC1"); type=COP1; break;
8242           case 0x08: strcpy(insn[i],"BC1"); type=FJUMP;
8243           switch((source[i]>>16)&0x3)
8244           {
8245             case 0x00: strcpy(insn[i],"BC1F"); break;
8246             case 0x01: strcpy(insn[i],"BC1T"); break;
8247             case 0x02: strcpy(insn[i],"BC1FL"); break;
8248             case 0x03: strcpy(insn[i],"BC1TL"); break;
8249           }
8250           break;
8251           case 0x10: strcpy(insn[i],"C1.S"); type=NI;
8252           switch(source[i]&0x3f)
8253           {
8254             case 0x00: strcpy(insn[i],"ADD.S"); type=FLOAT; break;
8255             case 0x01: strcpy(insn[i],"SUB.S"); type=FLOAT; break;
8256             case 0x02: strcpy(insn[i],"MUL.S"); type=FLOAT; break;
8257             case 0x03: strcpy(insn[i],"DIV.S"); type=FLOAT; break;
8258             case 0x04: strcpy(insn[i],"SQRT.S"); type=FLOAT; break;
8259             case 0x05: strcpy(insn[i],"ABS.S"); type=FLOAT; break;
8260             case 0x06: strcpy(insn[i],"MOV.S"); type=FLOAT; break;
8261             case 0x07: strcpy(insn[i],"NEG.S"); type=FLOAT; break;
8262             case 0x08: strcpy(insn[i],"ROUND.L.S"); type=FCONV; break;
8263             case 0x09: strcpy(insn[i],"TRUNC.L.S"); type=FCONV; break;
8264             case 0x0A: strcpy(insn[i],"CEIL.L.S"); type=FCONV; break;
8265             case 0x0B: strcpy(insn[i],"FLOOR.L.S"); type=FCONV; break;
8266             case 0x0C: strcpy(insn[i],"ROUND.W.S"); type=FCONV; break;
8267             case 0x0D: strcpy(insn[i],"TRUNC.W.S"); type=FCONV; break;
8268             case 0x0E: strcpy(insn[i],"CEIL.W.S"); type=FCONV; break;
8269             case 0x0F: strcpy(insn[i],"FLOOR.W.S"); type=FCONV; break;
8270             case 0x21: strcpy(insn[i],"CVT.D.S"); type=FCONV; break;
8271             case 0x24: strcpy(insn[i],"CVT.W.S"); type=FCONV; break;
8272             case 0x25: strcpy(insn[i],"CVT.L.S"); type=FCONV; break;
8273             case 0x30: strcpy(insn[i],"C.F.S"); type=FCOMP; break;
8274             case 0x31: strcpy(insn[i],"C.UN.S"); type=FCOMP; break;
8275             case 0x32: strcpy(insn[i],"C.EQ.S"); type=FCOMP; break;
8276             case 0x33: strcpy(insn[i],"C.UEQ.S"); type=FCOMP; break;
8277             case 0x34: strcpy(insn[i],"C.OLT.S"); type=FCOMP; break;
8278             case 0x35: strcpy(insn[i],"C.ULT.S"); type=FCOMP; break;
8279             case 0x36: strcpy(insn[i],"C.OLE.S"); type=FCOMP; break;
8280             case 0x37: strcpy(insn[i],"C.ULE.S"); type=FCOMP; break;
8281             case 0x38: strcpy(insn[i],"C.SF.S"); type=FCOMP; break;
8282             case 0x39: strcpy(insn[i],"C.NGLE.S"); type=FCOMP; break;
8283             case 0x3A: strcpy(insn[i],"C.SEQ.S"); type=FCOMP; break;
8284             case 0x3B: strcpy(insn[i],"C.NGL.S"); type=FCOMP; break;
8285             case 0x3C: strcpy(insn[i],"C.LT.S"); type=FCOMP; break;
8286             case 0x3D: strcpy(insn[i],"C.NGE.S"); type=FCOMP; break;
8287             case 0x3E: strcpy(insn[i],"C.LE.S"); type=FCOMP; break;
8288             case 0x3F: strcpy(insn[i],"C.NGT.S"); type=FCOMP; break;
8289           }
8290           break;
8291           case 0x11: strcpy(insn[i],"C1.D"); type=NI;
8292           switch(source[i]&0x3f)
8293           {
8294             case 0x00: strcpy(insn[i],"ADD.D"); type=FLOAT; break;
8295             case 0x01: strcpy(insn[i],"SUB.D"); type=FLOAT; break;
8296             case 0x02: strcpy(insn[i],"MUL.D"); type=FLOAT; break;
8297             case 0x03: strcpy(insn[i],"DIV.D"); type=FLOAT; break;
8298             case 0x04: strcpy(insn[i],"SQRT.D"); type=FLOAT; break;
8299             case 0x05: strcpy(insn[i],"ABS.D"); type=FLOAT; break;
8300             case 0x06: strcpy(insn[i],"MOV.D"); type=FLOAT; break;
8301             case 0x07: strcpy(insn[i],"NEG.D"); type=FLOAT; break;
8302             case 0x08: strcpy(insn[i],"ROUND.L.D"); type=FCONV; break;
8303             case 0x09: strcpy(insn[i],"TRUNC.L.D"); type=FCONV; break;
8304             case 0x0A: strcpy(insn[i],"CEIL.L.D"); type=FCONV; break;
8305             case 0x0B: strcpy(insn[i],"FLOOR.L.D"); type=FCONV; break;
8306             case 0x0C: strcpy(insn[i],"ROUND.W.D"); type=FCONV; break;
8307             case 0x0D: strcpy(insn[i],"TRUNC.W.D"); type=FCONV; break;
8308             case 0x0E: strcpy(insn[i],"CEIL.W.D"); type=FCONV; break;
8309             case 0x0F: strcpy(insn[i],"FLOOR.W.D"); type=FCONV; break;
8310             case 0x20: strcpy(insn[i],"CVT.S.D"); type=FCONV; break;
8311             case 0x24: strcpy(insn[i],"CVT.W.D"); type=FCONV; break;
8312             case 0x25: strcpy(insn[i],"CVT.L.D"); type=FCONV; break;
8313             case 0x30: strcpy(insn[i],"C.F.D"); type=FCOMP; break;
8314             case 0x31: strcpy(insn[i],"C.UN.D"); type=FCOMP; break;
8315             case 0x32: strcpy(insn[i],"C.EQ.D"); type=FCOMP; break;
8316             case 0x33: strcpy(insn[i],"C.UEQ.D"); type=FCOMP; break;
8317             case 0x34: strcpy(insn[i],"C.OLT.D"); type=FCOMP; break;
8318             case 0x35: strcpy(insn[i],"C.ULT.D"); type=FCOMP; break;
8319             case 0x36: strcpy(insn[i],"C.OLE.D"); type=FCOMP; break;
8320             case 0x37: strcpy(insn[i],"C.ULE.D"); type=FCOMP; break;
8321             case 0x38: strcpy(insn[i],"C.SF.D"); type=FCOMP; break;
8322             case 0x39: strcpy(insn[i],"C.NGLE.D"); type=FCOMP; break;
8323             case 0x3A: strcpy(insn[i],"C.SEQ.D"); type=FCOMP; break;
8324             case 0x3B: strcpy(insn[i],"C.NGL.D"); type=FCOMP; break;
8325             case 0x3C: strcpy(insn[i],"C.LT.D"); type=FCOMP; break;
8326             case 0x3D: strcpy(insn[i],"C.NGE.D"); type=FCOMP; break;
8327             case 0x3E: strcpy(insn[i],"C.LE.D"); type=FCOMP; break;
8328             case 0x3F: strcpy(insn[i],"C.NGT.D"); type=FCOMP; break;
8329           }
8330           break;
8331           case 0x14: strcpy(insn[i],"C1.W"); type=NI;
8332           switch(source[i]&0x3f)
8333           {
8334             case 0x20: strcpy(insn[i],"CVT.S.W"); type=FCONV; break;
8335             case 0x21: strcpy(insn[i],"CVT.D.W"); type=FCONV; break;
8336           }
8337           break;
8338           case 0x15: strcpy(insn[i],"C1.L"); type=NI;
8339           switch(source[i]&0x3f)
8340           {
8341             case 0x20: strcpy(insn[i],"CVT.S.L"); type=FCONV; break;
8342             case 0x21: strcpy(insn[i],"CVT.D.L"); type=FCONV; break;
8343           }
8344           break;
8345         }
8346         break;
8347 #ifndef FORCE32
8348       case 0x14: strcpy(insn[i],"BEQL"); type=CJUMP; break;
8349       case 0x15: strcpy(insn[i],"BNEL"); type=CJUMP; break;
8350       case 0x16: strcpy(insn[i],"BLEZL"); type=CJUMP; break;
8351       case 0x17: strcpy(insn[i],"BGTZL"); type=CJUMP; break;
8352       case 0x18: strcpy(insn[i],"DADDI"); type=IMM16; break;
8353       case 0x19: strcpy(insn[i],"DADDIU"); type=IMM16; break;
8354       case 0x1A: strcpy(insn[i],"LDL"); type=LOADLR; break;
8355       case 0x1B: strcpy(insn[i],"LDR"); type=LOADLR; break;
8356 #endif
8357       case 0x20: strcpy(insn[i],"LB"); type=LOAD; break;
8358       case 0x21: strcpy(insn[i],"LH"); type=LOAD; break;
8359       case 0x22: strcpy(insn[i],"LWL"); type=LOADLR; break;
8360       case 0x23: strcpy(insn[i],"LW"); type=LOAD; break;
8361       case 0x24: strcpy(insn[i],"LBU"); type=LOAD; break;
8362       case 0x25: strcpy(insn[i],"LHU"); type=LOAD; break;
8363       case 0x26: strcpy(insn[i],"LWR"); type=LOADLR; break;
8364 #ifndef FORCE32
8365       case 0x27: strcpy(insn[i],"LWU"); type=LOAD; break;
8366 #endif
8367       case 0x28: strcpy(insn[i],"SB"); type=STORE; break;
8368       case 0x29: strcpy(insn[i],"SH"); type=STORE; break;
8369       case 0x2A: strcpy(insn[i],"SWL"); type=STORELR; break;
8370       case 0x2B: strcpy(insn[i],"SW"); type=STORE; break;
8371 #ifndef FORCE32
8372       case 0x2C: strcpy(insn[i],"SDL"); type=STORELR; break;
8373       case 0x2D: strcpy(insn[i],"SDR"); type=STORELR; break;
8374 #endif
8375       case 0x2E: strcpy(insn[i],"SWR"); type=STORELR; break;
8376       case 0x2F: strcpy(insn[i],"CACHE"); type=NOP; break;
8377       case 0x30: strcpy(insn[i],"LL"); type=NI; break;
8378       case 0x31: strcpy(insn[i],"LWC1"); type=C1LS; break;
8379 #ifndef FORCE32
8380       case 0x34: strcpy(insn[i],"LLD"); type=NI; break;
8381       case 0x35: strcpy(insn[i],"LDC1"); type=C1LS; break;
8382       case 0x37: strcpy(insn[i],"LD"); type=LOAD; break;
8383 #endif
8384       case 0x38: strcpy(insn[i],"SC"); type=NI; break;
8385       case 0x39: strcpy(insn[i],"SWC1"); type=C1LS; break;
8386 #ifndef FORCE32
8387       case 0x3C: strcpy(insn[i],"SCD"); type=NI; break;
8388       case 0x3D: strcpy(insn[i],"SDC1"); type=C1LS; break;
8389       case 0x3F: strcpy(insn[i],"SD"); type=STORE; break;
8390 #endif
8391 #ifdef PCSX
8392       case 0x12: strcpy(insn[i],"COP2"); type=NI;
8393         op2=(source[i]>>21)&0x1f;
8394         //if (op2 & 0x10) {
8395         if (source[i]&0x3f) { // use this hack to support old savestates with patched gte insns
8396           if (gte_handlers[source[i]&0x3f]!=NULL) {
8397             if (gte_regnames[source[i]&0x3f]!=NULL)
8398               strcpy(insn[i],gte_regnames[source[i]&0x3f]);
8399             else
8400               snprintf(insn[i], sizeof(insn[i]), "COP2 %x", source[i]&0x3f);
8401             type=C2OP;
8402           }
8403         }
8404         else switch(op2)
8405         {
8406           case 0x00: strcpy(insn[i],"MFC2"); type=COP2; break;
8407           case 0x02: strcpy(insn[i],"CFC2"); type=COP2; break;
8408           case 0x04: strcpy(insn[i],"MTC2"); type=COP2; break;
8409           case 0x06: strcpy(insn[i],"CTC2"); type=COP2; break;
8410         }
8411         break;
8412       case 0x32: strcpy(insn[i],"LWC2"); type=C2LS; break;
8413       case 0x3A: strcpy(insn[i],"SWC2"); type=C2LS; break;
8414       case 0x3B: strcpy(insn[i],"HLECALL"); type=HLECALL; break;
8415 #endif
8416       default: strcpy(insn[i],"???"); type=NI;
8417         printf("NI %08x @%08x (%08x)\n", source[i], addr + i*4, addr);
8418         break;
8419     }
8420     itype[i]=type;
8421     opcode2[i]=op2;
8422     /* Get registers/immediates */
8423     lt1[i]=0;
8424     us1[i]=0;
8425     us2[i]=0;
8426     dep1[i]=0;
8427     dep2[i]=0;
8428     gte_rs[i]=gte_rt[i]=0;
8429     switch(type) {
8430       case LOAD:
8431         rs1[i]=(source[i]>>21)&0x1f;
8432         rs2[i]=0;
8433         rt1[i]=(source[i]>>16)&0x1f;
8434         rt2[i]=0;
8435         imm[i]=(short)source[i];
8436         break;
8437       case STORE:
8438       case STORELR:
8439         rs1[i]=(source[i]>>21)&0x1f;
8440         rs2[i]=(source[i]>>16)&0x1f;
8441         rt1[i]=0;
8442         rt2[i]=0;
8443         imm[i]=(short)source[i];
8444         if(op==0x2c||op==0x2d||op==0x3f) us1[i]=rs2[i]; // 64-bit SDL/SDR/SD
8445         break;
8446       case LOADLR:
8447         // LWL/LWR only load part of the register,
8448         // therefore the target register must be treated as a source too
8449         rs1[i]=(source[i]>>21)&0x1f;
8450         rs2[i]=(source[i]>>16)&0x1f;
8451         rt1[i]=(source[i]>>16)&0x1f;
8452         rt2[i]=0;
8453         imm[i]=(short)source[i];
8454         if(op==0x1a||op==0x1b) us1[i]=rs2[i]; // LDR/LDL
8455         if(op==0x26) dep1[i]=rt1[i]; // LWR
8456         break;
8457       case IMM16:
8458         if (op==0x0f) rs1[i]=0; // LUI instruction has no source register
8459         else rs1[i]=(source[i]>>21)&0x1f;
8460         rs2[i]=0;
8461         rt1[i]=(source[i]>>16)&0x1f;
8462         rt2[i]=0;
8463         if(op>=0x0c&&op<=0x0e) { // ANDI/ORI/XORI
8464           imm[i]=(unsigned short)source[i];
8465         }else{
8466           imm[i]=(short)source[i];
8467         }
8468         if(op==0x18||op==0x19) us1[i]=rs1[i]; // DADDI/DADDIU
8469         if(op==0x0a||op==0x0b) us1[i]=rs1[i]; // SLTI/SLTIU
8470         if(op==0x0d||op==0x0e) dep1[i]=rs1[i]; // ORI/XORI
8471         break;
8472       case UJUMP:
8473         rs1[i]=0;
8474         rs2[i]=0;
8475         rt1[i]=0;
8476         rt2[i]=0;
8477         // The JAL instruction writes to r31.
8478         if (op&1) {
8479           rt1[i]=31;
8480         }
8481         rs2[i]=CCREG;
8482         break;
8483       case RJUMP:
8484         rs1[i]=(source[i]>>21)&0x1f;
8485         rs2[i]=0;
8486         rt1[i]=0;
8487         rt2[i]=0;
8488         // The JALR instruction writes to rd.
8489         if (op2&1) {
8490           rt1[i]=(source[i]>>11)&0x1f;
8491         }
8492         rs2[i]=CCREG;
8493         break;
8494       case CJUMP:
8495         rs1[i]=(source[i]>>21)&0x1f;
8496         rs2[i]=(source[i]>>16)&0x1f;
8497         rt1[i]=0;
8498         rt2[i]=0;
8499         if(op&2) { // BGTZ/BLEZ
8500           rs2[i]=0;
8501         }
8502         us1[i]=rs1[i];
8503         us2[i]=rs2[i];
8504         likely[i]=op>>4;
8505         break;
8506       case SJUMP:
8507         rs1[i]=(source[i]>>21)&0x1f;
8508         rs2[i]=CCREG;
8509         rt1[i]=0;
8510         rt2[i]=0;
8511         us1[i]=rs1[i];
8512         if(op2&0x10) { // BxxAL
8513           rt1[i]=31;
8514           // NOTE: If the branch is not taken, r31 is still overwritten
8515         }
8516         likely[i]=(op2&2)>>1;
8517         break;
8518       case FJUMP:
8519         rs1[i]=FSREG;
8520         rs2[i]=CSREG;
8521         rt1[i]=0;
8522         rt2[i]=0;
8523         likely[i]=((source[i])>>17)&1;
8524         break;
8525       case ALU:
8526         rs1[i]=(source[i]>>21)&0x1f; // source
8527         rs2[i]=(source[i]>>16)&0x1f; // subtract amount
8528         rt1[i]=(source[i]>>11)&0x1f; // destination
8529         rt2[i]=0;
8530         if(op2==0x2a||op2==0x2b) { // SLT/SLTU
8531           us1[i]=rs1[i];us2[i]=rs2[i];
8532         }
8533         else if(op2>=0x24&&op2<=0x27) { // AND/OR/XOR/NOR
8534           dep1[i]=rs1[i];dep2[i]=rs2[i];
8535         }
8536         else if(op2>=0x2c&&op2<=0x2f) { // DADD/DSUB
8537           dep1[i]=rs1[i];dep2[i]=rs2[i];
8538         }
8539         break;
8540       case MULTDIV:
8541         rs1[i]=(source[i]>>21)&0x1f; // source
8542         rs2[i]=(source[i]>>16)&0x1f; // divisor
8543         rt1[i]=HIREG;
8544         rt2[i]=LOREG;
8545         if (op2>=0x1c&&op2<=0x1f) { // DMULT/DMULTU/DDIV/DDIVU
8546           us1[i]=rs1[i];us2[i]=rs2[i];
8547         }
8548         break;
8549       case MOV:
8550         rs1[i]=0;
8551         rs2[i]=0;
8552         rt1[i]=0;
8553         rt2[i]=0;
8554         if(op2==0x10) rs1[i]=HIREG; // MFHI
8555         if(op2==0x11) rt1[i]=HIREG; // MTHI
8556         if(op2==0x12) rs1[i]=LOREG; // MFLO
8557         if(op2==0x13) rt1[i]=LOREG; // MTLO
8558         if((op2&0x1d)==0x10) rt1[i]=(source[i]>>11)&0x1f; // MFxx
8559         if((op2&0x1d)==0x11) rs1[i]=(source[i]>>21)&0x1f; // MTxx
8560         dep1[i]=rs1[i];
8561         break;
8562       case SHIFT:
8563         rs1[i]=(source[i]>>16)&0x1f; // target of shift
8564         rs2[i]=(source[i]>>21)&0x1f; // shift amount
8565         rt1[i]=(source[i]>>11)&0x1f; // destination
8566         rt2[i]=0;
8567         // DSLLV/DSRLV/DSRAV are 64-bit
8568         if(op2>=0x14&&op2<=0x17) us1[i]=rs1[i];
8569         break;
8570       case SHIFTIMM:
8571         rs1[i]=(source[i]>>16)&0x1f;
8572         rs2[i]=0;
8573         rt1[i]=(source[i]>>11)&0x1f;
8574         rt2[i]=0;
8575         imm[i]=(source[i]>>6)&0x1f;
8576         // DSxx32 instructions
8577         if(op2>=0x3c) imm[i]|=0x20;
8578         // DSLL/DSRL/DSRA/DSRA32/DSRL32 but not DSLL32 require 64-bit source
8579         if(op2>=0x38&&op2!=0x3c) us1[i]=rs1[i];
8580         break;
8581       case COP0:
8582         rs1[i]=0;
8583         rs2[i]=0;
8584         rt1[i]=0;
8585         rt2[i]=0;
8586         if(op2==0) rt1[i]=(source[i]>>16)&0x1F; // MFC0
8587         if(op2==4) rs1[i]=(source[i]>>16)&0x1F; // MTC0
8588         if(op2==4&&((source[i]>>11)&0x1f)==12) rt2[i]=CSREG; // Status
8589         if(op2==16) if((source[i]&0x3f)==0x18) rs2[i]=CCREG; // ERET
8590         break;
8591       case COP1:
8592         rs1[i]=0;
8593         rs2[i]=0;
8594         rt1[i]=0;
8595         rt2[i]=0;
8596         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC1/DMFC1/CFC1
8597         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC1/DMTC1/CTC1
8598         if(op2==5) us1[i]=rs1[i]; // DMTC1
8599         rs2[i]=CSREG;
8600         break;
8601       case COP2:
8602         rs1[i]=0;
8603         rs2[i]=0;
8604         rt1[i]=0;
8605         rt2[i]=0;
8606         if(op2<3) rt1[i]=(source[i]>>16)&0x1F; // MFC2/CFC2
8607         if(op2>3) rs1[i]=(source[i]>>16)&0x1F; // MTC2/CTC2
8608         rs2[i]=CSREG;
8609         int gr=(source[i]>>11)&0x1F;
8610         switch(op2)
8611         {
8612           case 0x00: gte_rs[i]=1ll<<gr; break; // MFC2
8613           case 0x04: gte_rt[i]=1ll<<gr; break; // MTC2
8614           case 0x02: gte_rs[i]=1ll<<(gr+32); // CFC2
8615             if(gr==31&&!gte_reads_flags) {
8616               assem_debug("gte flag read encountered @%08x\n",addr + i*4);
8617               gte_reads_flags=1;
8618             }
8619             break;
8620           case 0x06: gte_rt[i]=1ll<<(gr+32); break; // CTC2
8621         }
8622         break;
8623       case C1LS:
8624         rs1[i]=(source[i]>>21)&0x1F;
8625         rs2[i]=CSREG;
8626         rt1[i]=0;
8627         rt2[i]=0;
8628         imm[i]=(short)source[i];
8629         break;
8630       case C2LS:
8631         rs1[i]=(source[i]>>21)&0x1F;
8632         rs2[i]=0;
8633         rt1[i]=0;
8634         rt2[i]=0;
8635         imm[i]=(short)source[i];
8636         if(op==0x32) gte_rt[i]=1ll<<((source[i]>>16)&0x1F); // LWC2
8637         else gte_rs[i]=1ll<<((source[i]>>16)&0x1F); // SWC2
8638         break;
8639       case C2OP:
8640         rs1[i]=0;
8641         rs2[i]=0;
8642         rt1[i]=0;
8643         rt2[i]=0;
8644         gte_rt[i]=1ll<<63; // every op changes flags
8645         // TODO: other regs?
8646         break;
8647       case FLOAT:
8648       case FCONV:
8649         rs1[i]=0;
8650         rs2[i]=CSREG;
8651         rt1[i]=0;
8652         rt2[i]=0;
8653         break;
8654       case FCOMP:
8655         rs1[i]=FSREG;
8656         rs2[i]=CSREG;
8657         rt1[i]=FSREG;
8658         rt2[i]=0;
8659         break;
8660       case SYSCALL:
8661       case HLECALL:
8662       case INTCALL:
8663         rs1[i]=CCREG;
8664         rs2[i]=0;
8665         rt1[i]=0;
8666         rt2[i]=0;
8667         break;
8668       default:
8669         rs1[i]=0;
8670         rs2[i]=0;
8671         rt1[i]=0;
8672         rt2[i]=0;
8673     }
8674     /* Calculate branch target addresses */
8675     if(type==UJUMP)
8676       ba[i]=((start+i*4+4)&0xF0000000)|(((unsigned int)source[i]<<6)>>4);
8677     else if(type==CJUMP&&rs1[i]==rs2[i]&&(op&1))
8678       ba[i]=start+i*4+8; // Ignore never taken branch
8679     else if(type==SJUMP&&rs1[i]==0&&!(op2&1))
8680       ba[i]=start+i*4+8; // Ignore never taken branch
8681     else if(type==CJUMP||type==SJUMP||type==FJUMP)
8682       ba[i]=start+i*4+4+((signed int)((unsigned int)source[i]<<16)>>14);
8683     else ba[i]=-1;
8684 #ifdef PCSX
8685     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP)) {
8686       int do_in_intrp=0;
8687       // branch in delay slot?
8688       if(type==RJUMP||type==UJUMP||type==CJUMP||type==SJUMP||type==FJUMP) {
8689         // don't handle first branch and call interpreter if it's hit
8690         printf("branch in delay slot @%08x (%08x)\n", addr + i*4, addr);
8691         do_in_intrp=1;
8692       }
8693       // basic load delay detection
8694       else if((type==LOAD||type==LOADLR||type==COP0||type==COP2||type==C2LS)&&rt1[i]!=0) {
8695         int t=(ba[i-1]-start)/4;
8696         if(0 <= t && t < i &&(rt1[i]==rs1[t]||rt1[i]==rs2[t])&&itype[t]!=CJUMP&&itype[t]!=SJUMP) {
8697           // jump target wants DS result - potential load delay effect
8698           printf("load delay @%08x (%08x)\n", addr + i*4, addr);
8699           do_in_intrp=1;
8700           bt[t+1]=1; // expected return from interpreter
8701         }
8702         else if(i>=2&&rt1[i-2]==2&&rt1[i]==2&&rs1[i]!=2&&rs2[i]!=2&&rs1[i-1]!=2&&rs2[i-1]!=2&&
8703               !(i>=3&&(itype[i-3]==RJUMP||itype[i-3]==UJUMP||itype[i-3]==CJUMP||itype[i-3]==SJUMP))) {
8704           // v0 overwrite like this is a sign of trouble, bail out
8705           printf("v0 overwrite @%08x (%08x)\n", addr + i*4, addr);
8706           do_in_intrp=1;
8707         }
8708       }
8709       if(do_in_intrp) {
8710         rs1[i-1]=CCREG;
8711         rs2[i-1]=rt1[i-1]=rt2[i-1]=0;
8712         ba[i-1]=-1;
8713         itype[i-1]=INTCALL;
8714         done=2;
8715         i--; // don't compile the DS
8716       }
8717     }
8718 #endif
8719     /* Is this the end of the block? */
8720     if(i>0&&(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)) {
8721       if(rt1[i-1]==0) { // Continue past subroutine call (JAL)
8722         done=2;
8723       }
8724       else {
8725         if(stop_after_jal) done=1;
8726         // Stop on BREAK
8727         if((source[i+1]&0xfc00003f)==0x0d) done=1;
8728       }
8729       // Don't recompile stuff that's already compiled
8730       if(check_addr(start+i*4+4)) done=1;
8731       // Don't get too close to the limit
8732       if(i>MAXBLOCK/2) done=1;
8733     }
8734     if(itype[i]==SYSCALL&&stop_after_jal) done=1;
8735     if(itype[i]==HLECALL||itype[i]==INTCALL) done=2;
8736     if(done==2) {
8737       // Does the block continue due to a branch?
8738       for(j=i-1;j>=0;j--)
8739       {
8740         if(ba[j]==start+i*4) done=j=0; // Branch into delay slot
8741         if(ba[j]==start+i*4+4) done=j=0;
8742         if(ba[j]==start+i*4+8) done=j=0;
8743       }
8744     }
8745     //assert(i<MAXBLOCK-1);
8746     if(start+i*4==pagelimit-4) done=1;
8747     assert(start+i*4<pagelimit);
8748     if (i==MAXBLOCK-1) done=1;
8749     // Stop if we're compiling junk
8750     if(itype[i]==NI&&opcode[i]==0x11) {
8751       done=stop_after_jal=1;
8752       printf("Disabled speculative precompilation\n");
8753     }
8754   }
8755   slen=i;
8756   if(itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==RJUMP||itype[i-1]==FJUMP) {
8757     if(start+i*4==pagelimit) {
8758       itype[i-1]=SPAN;
8759     }
8760   }
8761   assert(slen>0);
8762
8763   /* Pass 2 - Register dependencies and branch targets */
8764
8765   unneeded_registers(0,slen-1,0);
8766   
8767   /* Pass 3 - Register allocation */
8768
8769   struct regstat current; // Current register allocations/status
8770   current.is32=1;
8771   current.dirty=0;
8772   current.u=unneeded_reg[0];
8773   current.uu=unneeded_reg_upper[0];
8774   clear_all_regs(current.regmap);
8775   alloc_reg(&current,0,CCREG);
8776   dirty_reg(&current,CCREG);
8777   current.isconst=0;
8778   current.wasconst=0;
8779   int ds=0;
8780   int cc=0;
8781   int hr=-1;
8782
8783 #ifndef FORCE32
8784   provisional_32bit();
8785 #endif
8786   if((u_int)addr&1) {
8787     // First instruction is delay slot
8788     cc=-1;
8789     bt[1]=1;
8790     ds=1;
8791     unneeded_reg[0]=1;
8792     unneeded_reg_upper[0]=1;
8793     current.regmap[HOST_BTREG]=BTREG;
8794   }
8795   
8796   for(i=0;i<slen;i++)
8797   {
8798     if(bt[i])
8799     {
8800       int hr;
8801       for(hr=0;hr<HOST_REGS;hr++)
8802       {
8803         // Is this really necessary?
8804         if(current.regmap[hr]==0) current.regmap[hr]=-1;
8805       }
8806       current.isconst=0;
8807     }
8808     if(i>1)
8809     {
8810       if((opcode[i-2]&0x2f)==0x05) // BNE/BNEL
8811       {
8812         if(rs1[i-2]==0||rs2[i-2]==0)
8813         {
8814           if(rs1[i-2]) {
8815             current.is32|=1LL<<rs1[i-2];
8816             int hr=get_reg(current.regmap,rs1[i-2]|64);
8817             if(hr>=0) current.regmap[hr]=-1;
8818           }
8819           if(rs2[i-2]) {
8820             current.is32|=1LL<<rs2[i-2];
8821             int hr=get_reg(current.regmap,rs2[i-2]|64);
8822             if(hr>=0) current.regmap[hr]=-1;
8823           }
8824         }
8825       }
8826     }
8827 #ifndef FORCE32
8828     // If something jumps here with 64-bit values
8829     // then promote those registers to 64 bits
8830     if(bt[i])
8831     {
8832       uint64_t temp_is32=current.is32;
8833       for(j=i-1;j>=0;j--)
8834       {
8835         if(ba[j]==start+i*4) 
8836           temp_is32&=branch_regs[j].is32;
8837       }
8838       for(j=i;j<slen;j++)
8839       {
8840         if(ba[j]==start+i*4) 
8841           //temp_is32=1;
8842           temp_is32&=p32[j];
8843       }
8844       if(temp_is32!=current.is32) {
8845         //printf("dumping 32-bit regs (%x)\n",start+i*4);
8846         #ifndef DESTRUCTIVE_WRITEBACK
8847         if(ds)
8848         #endif
8849         for(hr=0;hr<HOST_REGS;hr++)
8850         {
8851           int r=current.regmap[hr];
8852           if(r>0&&r<64)
8853           {
8854             if((current.dirty>>hr)&((current.is32&~temp_is32)>>r)&1) {
8855               temp_is32|=1LL<<r;
8856               //printf("restore %d\n",r);
8857             }
8858           }
8859         }
8860         current.is32=temp_is32;
8861       }
8862     }
8863 #else
8864     current.is32=-1LL;
8865 #endif
8866
8867     memcpy(regmap_pre[i],current.regmap,sizeof(current.regmap));
8868     regs[i].wasconst=current.isconst;
8869     regs[i].was32=current.is32;
8870     regs[i].wasdirty=current.dirty;
8871     #if defined(DESTRUCTIVE_WRITEBACK) && !defined(FORCE32)
8872     // To change a dirty register from 32 to 64 bits, we must write
8873     // it out during the previous cycle (for branches, 2 cycles)
8874     if(i<slen-1&&bt[i+1]&&itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP)
8875     {
8876       uint64_t temp_is32=current.is32;
8877       for(j=i-1;j>=0;j--)
8878       {
8879         if(ba[j]==start+i*4+4) 
8880           temp_is32&=branch_regs[j].is32;
8881       }
8882       for(j=i;j<slen;j++)
8883       {
8884         if(ba[j]==start+i*4+4) 
8885           //temp_is32=1;
8886           temp_is32&=p32[j];
8887       }
8888       if(temp_is32!=current.is32) {
8889         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8890         for(hr=0;hr<HOST_REGS;hr++)
8891         {
8892           int r=current.regmap[hr];
8893           if(r>0)
8894           {
8895             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8896               if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP)
8897               {
8898                 if(rs1[i]!=(r&63)&&rs2[i]!=(r&63))
8899                 {
8900                   //printf("dump %d/r%d\n",hr,r);
8901                   current.regmap[hr]=-1;
8902                   if(get_reg(current.regmap,r|64)>=0) 
8903                     current.regmap[get_reg(current.regmap,r|64)]=-1;
8904                 }
8905               }
8906             }
8907           }
8908         }
8909       }
8910     }
8911     else if(i<slen-2&&bt[i+2]&&(source[i-1]>>16)!=0x1000&&(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP))
8912     {
8913       uint64_t temp_is32=current.is32;
8914       for(j=i-1;j>=0;j--)
8915       {
8916         if(ba[j]==start+i*4+8) 
8917           temp_is32&=branch_regs[j].is32;
8918       }
8919       for(j=i;j<slen;j++)
8920       {
8921         if(ba[j]==start+i*4+8) 
8922           //temp_is32=1;
8923           temp_is32&=p32[j];
8924       }
8925       if(temp_is32!=current.is32) {
8926         //printf("pre-dumping 32-bit regs (%x)\n",start+i*4);
8927         for(hr=0;hr<HOST_REGS;hr++)
8928         {
8929           int r=current.regmap[hr];
8930           if(r>0)
8931           {
8932             if((current.dirty>>hr)&((current.is32&~temp_is32)>>(r&63))&1) {
8933               if(rs1[i]!=(r&63)&&rs2[i]!=(r&63)&&rs1[i+1]!=(r&63)&&rs2[i+1]!=(r&63))
8934               {
8935                 //printf("dump %d/r%d\n",hr,r);
8936                 current.regmap[hr]=-1;
8937                 if(get_reg(current.regmap,r|64)>=0) 
8938                   current.regmap[get_reg(current.regmap,r|64)]=-1;
8939               }
8940             }
8941           }
8942         }
8943       }
8944     }
8945     #endif
8946     if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
8947       if(i+1<slen) {
8948         current.u=unneeded_reg[i+1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
8949         current.uu=unneeded_reg_upper[i+1]&~((1LL<<us1[i])|(1LL<<us2[i]));
8950         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8951         current.u|=1;
8952         current.uu|=1;
8953       } else {
8954         current.u=1;
8955         current.uu=1;
8956       }
8957     } else {
8958       if(i+1<slen) {
8959         current.u=branch_unneeded_reg[i]&~((1LL<<rs1[i+1])|(1LL<<rs2[i+1]));
8960         current.uu=branch_unneeded_reg_upper[i]&~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
8961         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
8962         current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8963         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8964         current.u|=1;
8965         current.uu|=1;
8966       } else { printf("oops, branch at end of block with no delay slot\n");exit(1); }
8967     }
8968     is_ds[i]=ds;
8969     if(ds) {
8970       ds=0; // Skip delay slot, already allocated as part of branch
8971       // ...but we need to alloc it in case something jumps here
8972       if(i+1<slen) {
8973         current.u=branch_unneeded_reg[i-1]&unneeded_reg[i+1];
8974         current.uu=branch_unneeded_reg_upper[i-1]&unneeded_reg_upper[i+1];
8975       }else{
8976         current.u=branch_unneeded_reg[i-1];
8977         current.uu=branch_unneeded_reg_upper[i-1];
8978       }
8979       current.u&=~((1LL<<rs1[i])|(1LL<<rs2[i]));
8980       current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
8981       if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
8982       current.u|=1;
8983       current.uu|=1;
8984       struct regstat temp;
8985       memcpy(&temp,&current,sizeof(current));
8986       temp.wasdirty=temp.dirty;
8987       temp.was32=temp.is32;
8988       // TODO: Take into account unconditional branches, as below
8989       delayslot_alloc(&temp,i);
8990       memcpy(regs[i].regmap,temp.regmap,sizeof(temp.regmap));
8991       regs[i].wasdirty=temp.wasdirty;
8992       regs[i].was32=temp.was32;
8993       regs[i].dirty=temp.dirty;
8994       regs[i].is32=temp.is32;
8995       regs[i].isconst=0;
8996       regs[i].wasconst=0;
8997       current.isconst=0;
8998       // Create entry (branch target) regmap
8999       for(hr=0;hr<HOST_REGS;hr++)
9000       {
9001         int r=temp.regmap[hr];
9002         if(r>=0) {
9003           if(r!=regmap_pre[i][hr]) {
9004             regs[i].regmap_entry[hr]=-1;
9005           }
9006           else
9007           {
9008             if(r<64){
9009               if((current.u>>r)&1) {
9010                 regs[i].regmap_entry[hr]=-1;
9011                 regs[i].regmap[hr]=-1;
9012                 //Don't clear regs in the delay slot as the branch might need them
9013                 //current.regmap[hr]=-1;
9014               }else
9015                 regs[i].regmap_entry[hr]=r;
9016             }
9017             else {
9018               if((current.uu>>(r&63))&1) {
9019                 regs[i].regmap_entry[hr]=-1;
9020                 regs[i].regmap[hr]=-1;
9021                 //Don't clear regs in the delay slot as the branch might need them
9022                 //current.regmap[hr]=-1;
9023               }else
9024                 regs[i].regmap_entry[hr]=r;
9025             }
9026           }
9027         } else {
9028           // First instruction expects CCREG to be allocated
9029           if(i==0&&hr==HOST_CCREG) 
9030             regs[i].regmap_entry[hr]=CCREG;
9031           else
9032             regs[i].regmap_entry[hr]=-1;
9033         }
9034       }
9035     }
9036     else { // Not delay slot
9037       switch(itype[i]) {
9038         case UJUMP:
9039           //current.isconst=0; // DEBUG
9040           //current.wasconst=0; // DEBUG
9041           //regs[i].wasconst=0; // DEBUG
9042           clear_const(&current,rt1[i]);
9043           alloc_cc(&current,i);
9044           dirty_reg(&current,CCREG);
9045           if (rt1[i]==31) {
9046             alloc_reg(&current,i,31);
9047             dirty_reg(&current,31);
9048             //assert(rs1[i+1]!=31&&rs2[i+1]!=31);
9049             //assert(rt1[i+1]!=rt1[i]);
9050             #ifdef REG_PREFETCH
9051             alloc_reg(&current,i,PTEMP);
9052             #endif
9053             //current.is32|=1LL<<rt1[i];
9054           }
9055           ooo[i]=1;
9056           delayslot_alloc(&current,i+1);
9057           //current.isconst=0; // DEBUG
9058           ds=1;
9059           //printf("i=%d, isconst=%x\n",i,current.isconst);
9060           break;
9061         case RJUMP:
9062           //current.isconst=0;
9063           //current.wasconst=0;
9064           //regs[i].wasconst=0;
9065           clear_const(&current,rs1[i]);
9066           clear_const(&current,rt1[i]);
9067           alloc_cc(&current,i);
9068           dirty_reg(&current,CCREG);
9069           if(rs1[i]!=rt1[i+1]&&rs1[i]!=rt2[i+1]) {
9070             alloc_reg(&current,i,rs1[i]);
9071             if (rt1[i]!=0) {
9072               alloc_reg(&current,i,rt1[i]);
9073               dirty_reg(&current,rt1[i]);
9074               assert(rs1[i+1]!=rt1[i]&&rs2[i+1]!=rt1[i]);
9075               assert(rt1[i+1]!=rt1[i]);
9076               #ifdef REG_PREFETCH
9077               alloc_reg(&current,i,PTEMP);
9078               #endif
9079             }
9080             #ifdef USE_MINI_HT
9081             if(rs1[i]==31) { // JALR
9082               alloc_reg(&current,i,RHASH);
9083               #ifndef HOST_IMM_ADDR32
9084               alloc_reg(&current,i,RHTBL);
9085               #endif
9086             }
9087             #endif
9088             delayslot_alloc(&current,i+1);
9089           } else {
9090             // The delay slot overwrites our source register,
9091             // allocate a temporary register to hold the old value.
9092             current.isconst=0;
9093             current.wasconst=0;
9094             regs[i].wasconst=0;
9095             delayslot_alloc(&current,i+1);
9096             current.isconst=0;
9097             alloc_reg(&current,i,RTEMP);
9098           }
9099           //current.isconst=0; // DEBUG
9100           ooo[i]=1;
9101           ds=1;
9102           break;
9103         case CJUMP:
9104           //current.isconst=0;
9105           //current.wasconst=0;
9106           //regs[i].wasconst=0;
9107           clear_const(&current,rs1[i]);
9108           clear_const(&current,rs2[i]);
9109           if((opcode[i]&0x3E)==4) // BEQ/BNE
9110           {
9111             alloc_cc(&current,i);
9112             dirty_reg(&current,CCREG);
9113             if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9114             if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9115             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9116             {
9117               if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9118               if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9119             }
9120             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1]))||
9121                (rs2[i]&&(rs2[i]==rt1[i+1]||rs2[i]==rt2[i+1]))) {
9122               // The delay slot overwrites one of our conditions.
9123               // Allocate the branch condition registers instead.
9124               current.isconst=0;
9125               current.wasconst=0;
9126               regs[i].wasconst=0;
9127               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9128               if(rs2[i]) alloc_reg(&current,i,rs2[i]);
9129               if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9130               {
9131                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9132                 if(rs2[i]) alloc_reg64(&current,i,rs2[i]);
9133               }
9134             }
9135             else
9136             {
9137               ooo[i]=1;
9138               delayslot_alloc(&current,i+1);
9139             }
9140           }
9141           else
9142           if((opcode[i]&0x3E)==6) // BLEZ/BGTZ
9143           {
9144             alloc_cc(&current,i);
9145             dirty_reg(&current,CCREG);
9146             alloc_reg(&current,i,rs1[i]);
9147             if(!(current.is32>>rs1[i]&1))
9148             {
9149               alloc_reg64(&current,i,rs1[i]);
9150             }
9151             if(rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) {
9152               // The delay slot overwrites one of our conditions.
9153               // Allocate the branch condition registers instead.
9154               current.isconst=0;
9155               current.wasconst=0;
9156               regs[i].wasconst=0;
9157               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9158               if(!((current.is32>>rs1[i])&1))
9159               {
9160                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9161               }
9162             }
9163             else
9164             {
9165               ooo[i]=1;
9166               delayslot_alloc(&current,i+1);
9167             }
9168           }
9169           else
9170           // Don't alloc the delay slot yet because we might not execute it
9171           if((opcode[i]&0x3E)==0x14) // BEQL/BNEL
9172           {
9173             current.isconst=0;
9174             current.wasconst=0;
9175             regs[i].wasconst=0;
9176             alloc_cc(&current,i);
9177             dirty_reg(&current,CCREG);
9178             alloc_reg(&current,i,rs1[i]);
9179             alloc_reg(&current,i,rs2[i]);
9180             if(!((current.is32>>rs1[i])&(current.is32>>rs2[i])&1))
9181             {
9182               alloc_reg64(&current,i,rs1[i]);
9183               alloc_reg64(&current,i,rs2[i]);
9184             }
9185           }
9186           else
9187           if((opcode[i]&0x3E)==0x16) // BLEZL/BGTZL
9188           {
9189             current.isconst=0;
9190             current.wasconst=0;
9191             regs[i].wasconst=0;
9192             alloc_cc(&current,i);
9193             dirty_reg(&current,CCREG);
9194             alloc_reg(&current,i,rs1[i]);
9195             if(!(current.is32>>rs1[i]&1))
9196             {
9197               alloc_reg64(&current,i,rs1[i]);
9198             }
9199           }
9200           ds=1;
9201           //current.isconst=0;
9202           break;
9203         case SJUMP:
9204           //current.isconst=0;
9205           //current.wasconst=0;
9206           //regs[i].wasconst=0;
9207           clear_const(&current,rs1[i]);
9208           clear_const(&current,rt1[i]);
9209           //if((opcode2[i]&0x1E)==0x0) // BLTZ/BGEZ
9210           if((opcode2[i]&0x0E)==0x0) // BLTZ/BGEZ
9211           {
9212             alloc_cc(&current,i);
9213             dirty_reg(&current,CCREG);
9214             alloc_reg(&current,i,rs1[i]);
9215             if(!(current.is32>>rs1[i]&1))
9216             {
9217               alloc_reg64(&current,i,rs1[i]);
9218             }
9219             if (rt1[i]==31) { // BLTZAL/BGEZAL
9220               alloc_reg(&current,i,31);
9221               dirty_reg(&current,31);
9222               //#ifdef REG_PREFETCH
9223               //alloc_reg(&current,i,PTEMP);
9224               //#endif
9225               //current.is32|=1LL<<rt1[i];
9226             }
9227             if((rs1[i]&&(rs1[i]==rt1[i+1]||rs1[i]==rt2[i+1])) // The delay slot overwrites the branch condition.
9228                ||(rt1[i]==31&&(rs1[i+1]==31||rs2[i+1]==31||rt1[i+1]==31||rt2[i+1]==31))) { // DS touches $ra
9229               // Allocate the branch condition registers instead.
9230               current.isconst=0;
9231               current.wasconst=0;
9232               regs[i].wasconst=0;
9233               if(rs1[i]) alloc_reg(&current,i,rs1[i]);
9234               if(!((current.is32>>rs1[i])&1))
9235               {
9236                 if(rs1[i]) alloc_reg64(&current,i,rs1[i]);
9237               }
9238             }
9239             else
9240             {
9241               ooo[i]=1;
9242               delayslot_alloc(&current,i+1);
9243             }
9244           }
9245           else
9246           // Don't alloc the delay slot yet because we might not execute it
9247           if((opcode2[i]&0x1E)==0x2) // BLTZL/BGEZL
9248           {
9249             current.isconst=0;
9250             current.wasconst=0;
9251             regs[i].wasconst=0;
9252             alloc_cc(&current,i);
9253             dirty_reg(&current,CCREG);
9254             alloc_reg(&current,i,rs1[i]);
9255             if(!(current.is32>>rs1[i]&1))
9256             {
9257               alloc_reg64(&current,i,rs1[i]);
9258             }
9259           }
9260           ds=1;
9261           //current.isconst=0;
9262           break;
9263         case FJUMP:
9264           current.isconst=0;
9265           current.wasconst=0;
9266           regs[i].wasconst=0;
9267           if(likely[i]==0) // BC1F/BC1T
9268           {
9269             // TODO: Theoretically we can run out of registers here on x86.
9270             // The delay slot can allocate up to six, and we need to check
9271             // CSREG before executing the delay slot.  Possibly we can drop
9272             // the cycle count and then reload it after checking that the
9273             // FPU is in a usable state, or don't do out-of-order execution.
9274             alloc_cc(&current,i);
9275             dirty_reg(&current,CCREG);
9276             alloc_reg(&current,i,FSREG);
9277             alloc_reg(&current,i,CSREG);
9278             if(itype[i+1]==FCOMP) {
9279               // The delay slot overwrites the branch condition.
9280               // Allocate the branch condition registers instead.
9281               alloc_cc(&current,i);
9282               dirty_reg(&current,CCREG);
9283               alloc_reg(&current,i,CSREG);
9284               alloc_reg(&current,i,FSREG);
9285             }
9286             else {
9287               ooo[i]=1;
9288               delayslot_alloc(&current,i+1);
9289               alloc_reg(&current,i+1,CSREG);
9290             }
9291           }
9292           else
9293           // Don't alloc the delay slot yet because we might not execute it
9294           if(likely[i]) // BC1FL/BC1TL
9295           {
9296             alloc_cc(&current,i);
9297             dirty_reg(&current,CCREG);
9298             alloc_reg(&current,i,CSREG);
9299             alloc_reg(&current,i,FSREG);
9300           }
9301           ds=1;
9302           current.isconst=0;
9303           break;
9304         case IMM16:
9305           imm16_alloc(&current,i);
9306           break;
9307         case LOAD:
9308         case LOADLR:
9309           load_alloc(&current,i);
9310           break;
9311         case STORE:
9312         case STORELR:
9313           store_alloc(&current,i);
9314           break;
9315         case ALU:
9316           alu_alloc(&current,i);
9317           break;
9318         case SHIFT:
9319           shift_alloc(&current,i);
9320           break;
9321         case MULTDIV:
9322           multdiv_alloc(&current,i);
9323           break;
9324         case SHIFTIMM:
9325           shiftimm_alloc(&current,i);
9326           break;
9327         case MOV:
9328           mov_alloc(&current,i);
9329           break;
9330         case COP0:
9331           cop0_alloc(&current,i);
9332           break;
9333         case COP1:
9334         case COP2:
9335           cop1_alloc(&current,i);
9336           break;
9337         case C1LS:
9338           c1ls_alloc(&current,i);
9339           break;
9340         case C2LS:
9341           c2ls_alloc(&current,i);
9342           break;
9343         case C2OP:
9344           c2op_alloc(&current,i);
9345           break;
9346         case FCONV:
9347           fconv_alloc(&current,i);
9348           break;
9349         case FLOAT:
9350           float_alloc(&current,i);
9351           break;
9352         case FCOMP:
9353           fcomp_alloc(&current,i);
9354           break;
9355         case SYSCALL:
9356         case HLECALL:
9357         case INTCALL:
9358           syscall_alloc(&current,i);
9359           break;
9360         case SPAN:
9361           pagespan_alloc(&current,i);
9362           break;
9363       }
9364       
9365       // Drop the upper half of registers that have become 32-bit
9366       current.uu|=current.is32&((1LL<<rt1[i])|(1LL<<rt2[i]));
9367       if(itype[i]!=UJUMP&&itype[i]!=CJUMP&&itype[i]!=SJUMP&&itype[i]!=RJUMP&&itype[i]!=FJUMP) {
9368         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9369         if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9370         current.uu|=1;
9371       } else {
9372         current.uu|=current.is32&((1LL<<rt1[i+1])|(1LL<<rt2[i+1]));
9373         current.uu&=~((1LL<<us1[i+1])|(1LL<<us2[i+1]));
9374         if((~current.uu>>rt1[i+1])&1) current.uu&=~((1LL<<dep1[i+1])|(1LL<<dep2[i+1]));
9375         current.uu&=~((1LL<<us1[i])|(1LL<<us2[i]));
9376         current.uu|=1;
9377       }
9378
9379       // Create entry (branch target) regmap
9380       for(hr=0;hr<HOST_REGS;hr++)
9381       {
9382         int r,or,er;
9383         r=current.regmap[hr];
9384         if(r>=0) {
9385           if(r!=regmap_pre[i][hr]) {
9386             // TODO: delay slot (?)
9387             or=get_reg(regmap_pre[i],r); // Get old mapping for this register
9388             if(or<0||(r&63)>=TEMPREG){
9389               regs[i].regmap_entry[hr]=-1;
9390             }
9391             else
9392             {
9393               // Just move it to a different register
9394               regs[i].regmap_entry[hr]=r;
9395               // If it was dirty before, it's still dirty
9396               if((regs[i].wasdirty>>or)&1) dirty_reg(&current,r&63);
9397             }
9398           }
9399           else
9400           {
9401             // Unneeded
9402             if(r==0){
9403               regs[i].regmap_entry[hr]=0;
9404             }
9405             else
9406             if(r<64){
9407               if((current.u>>r)&1) {
9408                 regs[i].regmap_entry[hr]=-1;
9409                 //regs[i].regmap[hr]=-1;
9410                 current.regmap[hr]=-1;
9411               }else
9412                 regs[i].regmap_entry[hr]=r;
9413             }
9414             else {
9415               if((current.uu>>(r&63))&1) {
9416                 regs[i].regmap_entry[hr]=-1;
9417                 //regs[i].regmap[hr]=-1;
9418                 current.regmap[hr]=-1;
9419               }else
9420                 regs[i].regmap_entry[hr]=r;
9421             }
9422           }
9423         } else {
9424           // Branches expect CCREG to be allocated at the target
9425           if(regmap_pre[i][hr]==CCREG) 
9426             regs[i].regmap_entry[hr]=CCREG;
9427           else
9428             regs[i].regmap_entry[hr]=-1;
9429         }
9430       }
9431       memcpy(regs[i].regmap,current.regmap,sizeof(current.regmap));
9432     }
9433     /* Branch post-alloc */
9434     if(i>0)
9435     {
9436       current.was32=current.is32;
9437       current.wasdirty=current.dirty;
9438       switch(itype[i-1]) {
9439         case UJUMP:
9440           memcpy(&branch_regs[i-1],&current,sizeof(current));
9441           branch_regs[i-1].isconst=0;
9442           branch_regs[i-1].wasconst=0;
9443           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9444           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9445           alloc_cc(&branch_regs[i-1],i-1);
9446           dirty_reg(&branch_regs[i-1],CCREG);
9447           if(rt1[i-1]==31) { // JAL
9448             alloc_reg(&branch_regs[i-1],i-1,31);
9449             dirty_reg(&branch_regs[i-1],31);
9450             branch_regs[i-1].is32|=1LL<<31;
9451           }
9452           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9453           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9454           break;
9455         case RJUMP:
9456           memcpy(&branch_regs[i-1],&current,sizeof(current));
9457           branch_regs[i-1].isconst=0;
9458           branch_regs[i-1].wasconst=0;
9459           branch_regs[i-1].u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9460           branch_regs[i-1].uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9461           alloc_cc(&branch_regs[i-1],i-1);
9462           dirty_reg(&branch_regs[i-1],CCREG);
9463           alloc_reg(&branch_regs[i-1],i-1,rs1[i-1]);
9464           if(rt1[i-1]!=0) { // JALR
9465             alloc_reg(&branch_regs[i-1],i-1,rt1[i-1]);
9466             dirty_reg(&branch_regs[i-1],rt1[i-1]);
9467             branch_regs[i-1].is32|=1LL<<rt1[i-1];
9468           }
9469           #ifdef USE_MINI_HT
9470           if(rs1[i-1]==31) { // JALR
9471             alloc_reg(&branch_regs[i-1],i-1,RHASH);
9472             #ifndef HOST_IMM_ADDR32
9473             alloc_reg(&branch_regs[i-1],i-1,RHTBL);
9474             #endif
9475           }
9476           #endif
9477           memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9478           memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9479           break;
9480         case CJUMP:
9481           if((opcode[i-1]&0x3E)==4) // BEQ/BNE
9482           {
9483             alloc_cc(&current,i-1);
9484             dirty_reg(&current,CCREG);
9485             if((rs1[i-1]&&(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]))||
9486                (rs2[i-1]&&(rs2[i-1]==rt1[i]||rs2[i-1]==rt2[i]))) {
9487               // The delay slot overwrote one of our conditions
9488               // Delay slot goes after the test (in order)
9489               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9490               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9491               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9492               current.u|=1;
9493               current.uu|=1;
9494               delayslot_alloc(&current,i);
9495               current.isconst=0;
9496             }
9497             else
9498             {
9499               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i-1])|(1LL<<rs2[i-1]));
9500               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i-1])|(1LL<<us2[i-1]));
9501               // Alloc the branch condition registers
9502               if(rs1[i-1]) alloc_reg(&current,i-1,rs1[i-1]);
9503               if(rs2[i-1]) alloc_reg(&current,i-1,rs2[i-1]);
9504               if(!((current.is32>>rs1[i-1])&(current.is32>>rs2[i-1])&1))
9505               {
9506                 if(rs1[i-1]) alloc_reg64(&current,i-1,rs1[i-1]);
9507                 if(rs2[i-1]) alloc_reg64(&current,i-1,rs2[i-1]);
9508               }
9509             }
9510             memcpy(&branch_regs[i-1],&current,sizeof(current));
9511             branch_regs[i-1].isconst=0;
9512             branch_regs[i-1].wasconst=0;
9513             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9514             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9515           }
9516           else
9517           if((opcode[i-1]&0x3E)==6) // BLEZ/BGTZ
9518           {
9519             alloc_cc(&current,i-1);
9520             dirty_reg(&current,CCREG);
9521             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9522               // The delay slot overwrote the branch condition
9523               // Delay slot goes after the test (in order)
9524               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9525               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9526               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9527               current.u|=1;
9528               current.uu|=1;
9529               delayslot_alloc(&current,i);
9530               current.isconst=0;
9531             }
9532             else
9533             {
9534               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9535               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9536               // Alloc the branch condition register
9537               alloc_reg(&current,i-1,rs1[i-1]);
9538               if(!(current.is32>>rs1[i-1]&1))
9539               {
9540                 alloc_reg64(&current,i-1,rs1[i-1]);
9541               }
9542             }
9543             memcpy(&branch_regs[i-1],&current,sizeof(current));
9544             branch_regs[i-1].isconst=0;
9545             branch_regs[i-1].wasconst=0;
9546             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9547             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9548           }
9549           else
9550           // Alloc the delay slot in case the branch is taken
9551           if((opcode[i-1]&0x3E)==0x14) // BEQL/BNEL
9552           {
9553             memcpy(&branch_regs[i-1],&current,sizeof(current));
9554             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9555             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9556             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9557             alloc_cc(&branch_regs[i-1],i);
9558             dirty_reg(&branch_regs[i-1],CCREG);
9559             delayslot_alloc(&branch_regs[i-1],i);
9560             branch_regs[i-1].isconst=0;
9561             alloc_reg(&current,i,CCREG); // Not taken path
9562             dirty_reg(&current,CCREG);
9563             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9564           }
9565           else
9566           if((opcode[i-1]&0x3E)==0x16) // BLEZL/BGTZL
9567           {
9568             memcpy(&branch_regs[i-1],&current,sizeof(current));
9569             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9570             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9571             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9572             alloc_cc(&branch_regs[i-1],i);
9573             dirty_reg(&branch_regs[i-1],CCREG);
9574             delayslot_alloc(&branch_regs[i-1],i);
9575             branch_regs[i-1].isconst=0;
9576             alloc_reg(&current,i,CCREG); // Not taken path
9577             dirty_reg(&current,CCREG);
9578             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9579           }
9580           break;
9581         case SJUMP:
9582           //if((opcode2[i-1]&0x1E)==0) // BLTZ/BGEZ
9583           if((opcode2[i-1]&0x0E)==0) // BLTZ/BGEZ
9584           {
9585             alloc_cc(&current,i-1);
9586             dirty_reg(&current,CCREG);
9587             if(rs1[i-1]==rt1[i]||rs1[i-1]==rt2[i]) {
9588               // The delay slot overwrote the branch condition
9589               // Delay slot goes after the test (in order)
9590               current.u=branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i]));
9591               current.uu=branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i]));
9592               if((~current.uu>>rt1[i])&1) current.uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]));
9593               current.u|=1;
9594               current.uu|=1;
9595               delayslot_alloc(&current,i);
9596               current.isconst=0;
9597             }
9598             else
9599             {
9600               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9601               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9602               // Alloc the branch condition register
9603               alloc_reg(&current,i-1,rs1[i-1]);
9604               if(!(current.is32>>rs1[i-1]&1))
9605               {
9606                 alloc_reg64(&current,i-1,rs1[i-1]);
9607               }
9608             }
9609             memcpy(&branch_regs[i-1],&current,sizeof(current));
9610             branch_regs[i-1].isconst=0;
9611             branch_regs[i-1].wasconst=0;
9612             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9613             memcpy(constmap[i],constmap[i-1],sizeof(current.constmap));
9614           }
9615           else
9616           // Alloc the delay slot in case the branch is taken
9617           if((opcode2[i-1]&0x1E)==2) // BLTZL/BGEZL
9618           {
9619             memcpy(&branch_regs[i-1],&current,sizeof(current));
9620             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9621             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9622             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9623             alloc_cc(&branch_regs[i-1],i);
9624             dirty_reg(&branch_regs[i-1],CCREG);
9625             delayslot_alloc(&branch_regs[i-1],i);
9626             branch_regs[i-1].isconst=0;
9627             alloc_reg(&current,i,CCREG); // Not taken path
9628             dirty_reg(&current,CCREG);
9629             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9630           }
9631           // FIXME: BLTZAL/BGEZAL
9632           if(opcode2[i-1]&0x10) { // BxxZAL
9633             alloc_reg(&branch_regs[i-1],i-1,31);
9634             dirty_reg(&branch_regs[i-1],31);
9635             branch_regs[i-1].is32|=1LL<<31;
9636           }
9637           break;
9638         case FJUMP:
9639           if(likely[i-1]==0) // BC1F/BC1T
9640           {
9641             alloc_cc(&current,i-1);
9642             dirty_reg(&current,CCREG);
9643             if(itype[i]==FCOMP) {
9644               // The delay slot overwrote the branch condition
9645               // Delay slot goes after the test (in order)
9646               delayslot_alloc(&current,i);
9647               current.isconst=0;
9648             }
9649             else
9650             {
9651               current.u=branch_unneeded_reg[i-1]&~(1LL<<rs1[i-1]);
9652               current.uu=branch_unneeded_reg_upper[i-1]&~(1LL<<us1[i-1]);
9653               // Alloc the branch condition register
9654               alloc_reg(&current,i-1,FSREG);
9655             }
9656             memcpy(&branch_regs[i-1],&current,sizeof(current));
9657             memcpy(&branch_regs[i-1].regmap_entry,&current.regmap,sizeof(current.regmap));
9658           }
9659           else // BC1FL/BC1TL
9660           {
9661             // Alloc the delay slot in case the branch is taken
9662             memcpy(&branch_regs[i-1],&current,sizeof(current));
9663             branch_regs[i-1].u=(branch_unneeded_reg[i-1]&~((1LL<<rs1[i])|(1LL<<rs2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9664             branch_regs[i-1].uu=(branch_unneeded_reg_upper[i-1]&~((1LL<<us1[i])|(1LL<<us2[i])|(1LL<<rt1[i])|(1LL<<rt2[i])))|1;
9665             if((~branch_regs[i-1].uu>>rt1[i])&1) branch_regs[i-1].uu&=~((1LL<<dep1[i])|(1LL<<dep2[i]))|1;
9666             alloc_cc(&branch_regs[i-1],i);
9667             dirty_reg(&branch_regs[i-1],CCREG);
9668             delayslot_alloc(&branch_regs[i-1],i);
9669             branch_regs[i-1].isconst=0;
9670             alloc_reg(&current,i,CCREG); // Not taken path
9671             dirty_reg(&current,CCREG);
9672             memcpy(&branch_regs[i-1].regmap_entry,&branch_regs[i-1].regmap,sizeof(current.regmap));
9673           }
9674           break;
9675       }
9676
9677       if(itype[i-1]==UJUMP||itype[i-1]==RJUMP||(source[i-1]>>16)==0x1000)
9678       {
9679         if(rt1[i-1]==31) // JAL/JALR
9680         {
9681           // Subroutine call will return here, don't alloc any registers
9682           current.is32=1;
9683           current.dirty=0;
9684           clear_all_regs(current.regmap);
9685           alloc_reg(&current,i,CCREG);
9686           dirty_reg(&current,CCREG);
9687         }
9688         else if(i+1<slen)
9689         {
9690           // Internal branch will jump here, match registers to caller
9691           current.is32=0x3FFFFFFFFLL;
9692           current.dirty=0;
9693           clear_all_regs(current.regmap);
9694           alloc_reg(&current,i,CCREG);
9695           dirty_reg(&current,CCREG);
9696           for(j=i-1;j>=0;j--)
9697           {
9698             if(ba[j]==start+i*4+4) {
9699               memcpy(current.regmap,branch_regs[j].regmap,sizeof(current.regmap));
9700               current.is32=branch_regs[j].is32;
9701               current.dirty=branch_regs[j].dirty;
9702               break;
9703             }
9704           }
9705           while(j>=0) {
9706             if(ba[j]==start+i*4+4) {
9707               for(hr=0;hr<HOST_REGS;hr++) {
9708                 if(current.regmap[hr]!=branch_regs[j].regmap[hr]) {
9709                   current.regmap[hr]=-1;
9710                 }
9711                 current.is32&=branch_regs[j].is32;
9712                 current.dirty&=branch_regs[j].dirty;
9713               }
9714             }
9715             j--;
9716           }
9717         }
9718       }
9719     }
9720
9721     // Count cycles in between branches
9722     ccadj[i]=cc;
9723     if(i>0&&(itype[i-1]==RJUMP||itype[i-1]==UJUMP||itype[i-1]==CJUMP||itype[i-1]==SJUMP||itype[i-1]==FJUMP||itype[i]==SYSCALL||itype[i]==HLECALL))
9724     {
9725       cc=0;
9726     }
9727 #ifdef PCSX
9728     else if(/*itype[i]==LOAD||*/itype[i]==STORE||itype[i]==C1LS) // load causes weird timing issues
9729     {
9730       cc+=2; // 2 cycle penalty (after CLOCK_DIVIDER)
9731     }
9732     else if(itype[i]==C2LS)
9733     {
9734       cc+=4;
9735     }
9736 #endif
9737     else
9738     {
9739       cc++;
9740     }
9741
9742     flush_dirty_uppers(&current);
9743     if(!is_ds[i]) {
9744       regs[i].is32=current.is32;
9745       regs[i].dirty=current.dirty;
9746       regs[i].isconst=current.isconst;
9747       memcpy(constmap[i],current.constmap,sizeof(current.constmap));
9748     }
9749     for(hr=0;hr<HOST_REGS;hr++) {
9750       if(hr!=EXCLUDE_REG&&regs[i].regmap[hr]>=0) {
9751         if(regmap_pre[i][hr]!=regs[i].regmap[hr]) {
9752           regs[i].wasconst&=~(1<<hr);
9753         }
9754       }
9755     }
9756     if(current.regmap[HOST_BTREG]==BTREG) current.regmap[HOST_BTREG]=-1;
9757   }
9758   
9759   /* Pass 4 - Cull unused host registers */
9760   
9761   uint64_t nr=0;
9762   
9763   for (i=slen-1;i>=0;i--)
9764   {
9765     int hr;
9766     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9767     {
9768       if(ba[i]<start || ba[i]>=(start+slen*4))
9769       {
9770         // Branch out of this block, don't need anything
9771         nr=0;
9772       }
9773       else
9774       {
9775         // Internal branch
9776         // Need whatever matches the target
9777         nr=0;
9778         int t=(ba[i]-start)>>2;
9779         for(hr=0;hr<HOST_REGS;hr++)
9780         {
9781           if(regs[i].regmap_entry[hr]>=0) {
9782             if(regs[i].regmap_entry[hr]==regs[t].regmap_entry[hr]) nr|=1<<hr;
9783           }
9784         }
9785       }
9786       // Conditional branch may need registers for following instructions
9787       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9788       {
9789         if(i<slen-2) {
9790           nr|=needed_reg[i+2];
9791           for(hr=0;hr<HOST_REGS;hr++)
9792           {
9793             if(regmap_pre[i+2][hr]>=0&&get_reg(regs[i+2].regmap_entry,regmap_pre[i+2][hr])<0) nr&=~(1<<hr);
9794             //if((regmap_entry[i+2][hr])>=0) if(!((nr>>hr)&1)) printf("%x-bogus(%d=%d)\n",start+i*4,hr,regmap_entry[i+2][hr]);
9795           }
9796         }
9797       }
9798       // Don't need stuff which is overwritten
9799       //if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9800       //if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9801       // Merge in delay slot
9802       for(hr=0;hr<HOST_REGS;hr++)
9803       {
9804         if(!likely[i]) {
9805           // These are overwritten unless the branch is "likely"
9806           // and the delay slot is nullified if not taken
9807           if(rt1[i+1]&&rt1[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9808           if(rt2[i+1]&&rt2[i+1]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9809         }
9810         if(us1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9811         if(us2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9812         if(rs1[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9813         if(rs2[i+1]==regmap_pre[i][hr]) nr|=1<<hr;
9814         if(us1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9815         if(us2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9816         if(rs1[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9817         if(rs2[i+1]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9818         if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1)) {
9819           if(dep1[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9820           if(dep2[i+1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9821         }
9822         if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1)) {
9823           if(dep1[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9824           if(dep2[i+1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9825         }
9826         if(itype[i+1]==STORE || itype[i+1]==STORELR || (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) {
9827           if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9828           if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9829         }
9830       }
9831     }
9832     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
9833     {
9834       // SYSCALL instruction (software interrupt)
9835       nr=0;
9836     }
9837     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
9838     {
9839       // ERET instruction (return from interrupt)
9840       nr=0;
9841     }
9842     else // Non-branch
9843     {
9844       if(i<slen-1) {
9845         for(hr=0;hr<HOST_REGS;hr++) {
9846           if(regmap_pre[i+1][hr]>=0&&get_reg(regs[i+1].regmap_entry,regmap_pre[i+1][hr])<0) nr&=~(1<<hr);
9847           if(regs[i].regmap[hr]!=regmap_pre[i+1][hr]) nr&=~(1<<hr);
9848           if(regs[i].regmap[hr]!=regmap_pre[i][hr]) nr&=~(1<<hr);
9849           if(regs[i].regmap[hr]<0) nr&=~(1<<hr);
9850         }
9851       }
9852     }
9853     for(hr=0;hr<HOST_REGS;hr++)
9854     {
9855       // Overwritten registers are not needed
9856       if(rt1[i]&&rt1[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9857       if(rt2[i]&&rt2[i]==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9858       if(FTEMP==(regs[i].regmap[hr]&63)) nr&=~(1<<hr);
9859       // Source registers are needed
9860       if(us1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9861       if(us2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9862       if(rs1[i]==regmap_pre[i][hr]) nr|=1<<hr;
9863       if(rs2[i]==regmap_pre[i][hr]) nr|=1<<hr;
9864       if(us1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9865       if(us2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9866       if(rs1[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9867       if(rs2[i]==regs[i].regmap_entry[hr]) nr|=1<<hr;
9868       if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1)) {
9869         if(dep1[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9870         if(dep1[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9871       }
9872       if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1)) {
9873         if(dep2[i]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9874         if(dep2[i]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9875       }
9876       if(itype[i]==STORE || itype[i]==STORELR || (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) {
9877         if(regmap_pre[i][hr]==INVCP) nr|=1<<hr;
9878         if(regs[i].regmap_entry[hr]==INVCP) nr|=1<<hr;
9879       }
9880       // Don't store a register immediately after writing it,
9881       // may prevent dual-issue.
9882       // But do so if this is a branch target, otherwise we
9883       // might have to load the register before the branch.
9884       if(i>0&&!bt[i]&&((regs[i].wasdirty>>hr)&1)) {
9885         if((regmap_pre[i][hr]>0&&regmap_pre[i][hr]<64&&!((unneeded_reg[i]>>regmap_pre[i][hr])&1)) ||
9886            (regmap_pre[i][hr]>64&&!((unneeded_reg_upper[i]>>(regmap_pre[i][hr]&63))&1)) ) {
9887           if(rt1[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9888           if(rt2[i-1]==(regmap_pre[i][hr]&63)) nr|=1<<hr;
9889         }
9890         if((regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64&&!((unneeded_reg[i]>>regs[i].regmap_entry[hr])&1)) ||
9891            (regs[i].regmap_entry[hr]>64&&!((unneeded_reg_upper[i]>>(regs[i].regmap_entry[hr]&63))&1)) ) {
9892           if(rt1[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9893           if(rt2[i-1]==(regs[i].regmap_entry[hr]&63)) nr|=1<<hr;
9894         }
9895       }
9896     }
9897     // Cycle count is needed at branches.  Assume it is needed at the target too.
9898     if(i==0||bt[i]||itype[i]==CJUMP||itype[i]==FJUMP||itype[i]==SPAN) {
9899       if(regmap_pre[i][HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9900       if(regs[i].regmap_entry[HOST_CCREG]==CCREG) nr|=1<<HOST_CCREG;
9901     }
9902     // Save it
9903     needed_reg[i]=nr;
9904     
9905     // Deallocate unneeded registers
9906     for(hr=0;hr<HOST_REGS;hr++)
9907     {
9908       if(!((nr>>hr)&1)) {
9909         if(regs[i].regmap_entry[hr]!=CCREG) regs[i].regmap_entry[hr]=-1;
9910         if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9911            (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9912            (regs[i].regmap[hr]&63)!=PTEMP && (regs[i].regmap[hr]&63)!=CCREG)
9913         {
9914           if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9915           {
9916             if(likely[i]) {
9917               regs[i].regmap[hr]=-1;
9918               regs[i].isconst&=~(1<<hr);
9919               if(i<slen-2) {
9920                 regmap_pre[i+2][hr]=-1;
9921                 regs[i+2].wasconst&=~(1<<hr);
9922               }
9923             }
9924           }
9925         }
9926         if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
9927         {
9928           int d1=0,d2=0,map=0,temp=0;
9929           if(get_reg(regs[i].regmap,rt1[i+1]|64)>=0||get_reg(branch_regs[i].regmap,rt1[i+1]|64)>=0)
9930           {
9931             d1=dep1[i+1];
9932             d2=dep2[i+1];
9933           }
9934           if(using_tlb) {
9935             if(itype[i+1]==LOAD || itype[i+1]==LOADLR ||
9936                itype[i+1]==STORE || itype[i+1]==STORELR ||
9937                itype[i+1]==C1LS || itype[i+1]==C2LS)
9938             map=TLREG;
9939           } else
9940           if(itype[i+1]==STORE || itype[i+1]==STORELR ||
9941              (opcode[i+1]&0x3b)==0x39 || (opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
9942             map=INVCP;
9943           }
9944           if(itype[i+1]==LOADLR || itype[i+1]==STORELR ||
9945              itype[i+1]==C1LS || itype[i+1]==C2LS)
9946             temp=FTEMP;
9947           if((regs[i].regmap[hr]&63)!=rs1[i] && (regs[i].regmap[hr]&63)!=rs2[i] &&
9948              (regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
9949              (regs[i].regmap[hr]&63)!=rt1[i+1] && (regs[i].regmap[hr]&63)!=rt2[i+1] &&
9950              (regs[i].regmap[hr]^64)!=us1[i+1] && (regs[i].regmap[hr]^64)!=us2[i+1] &&
9951              (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
9952              regs[i].regmap[hr]!=rs1[i+1] && regs[i].regmap[hr]!=rs2[i+1] &&
9953              (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=PTEMP &&
9954              regs[i].regmap[hr]!=RHASH && regs[i].regmap[hr]!=RHTBL &&
9955              regs[i].regmap[hr]!=RTEMP && regs[i].regmap[hr]!=CCREG &&
9956              regs[i].regmap[hr]!=map )
9957           {
9958             regs[i].regmap[hr]=-1;
9959             regs[i].isconst&=~(1<<hr);
9960             if((branch_regs[i].regmap[hr]&63)!=rs1[i] && (branch_regs[i].regmap[hr]&63)!=rs2[i] &&
9961                (branch_regs[i].regmap[hr]&63)!=rt1[i] && (branch_regs[i].regmap[hr]&63)!=rt2[i] &&
9962                (branch_regs[i].regmap[hr]&63)!=rt1[i+1] && (branch_regs[i].regmap[hr]&63)!=rt2[i+1] &&
9963                (branch_regs[i].regmap[hr]^64)!=us1[i+1] && (branch_regs[i].regmap[hr]^64)!=us2[i+1] &&
9964                (branch_regs[i].regmap[hr]^64)!=d1 && (branch_regs[i].regmap[hr]^64)!=d2 &&
9965                branch_regs[i].regmap[hr]!=rs1[i+1] && branch_regs[i].regmap[hr]!=rs2[i+1] &&
9966                (branch_regs[i].regmap[hr]&63)!=temp && branch_regs[i].regmap[hr]!=PTEMP &&
9967                branch_regs[i].regmap[hr]!=RHASH && branch_regs[i].regmap[hr]!=RHTBL &&
9968                branch_regs[i].regmap[hr]!=RTEMP && branch_regs[i].regmap[hr]!=CCREG &&
9969                branch_regs[i].regmap[hr]!=map)
9970             {
9971               branch_regs[i].regmap[hr]=-1;
9972               branch_regs[i].regmap_entry[hr]=-1;
9973               if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
9974               {
9975                 if(!likely[i]&&i<slen-2) {
9976                   regmap_pre[i+2][hr]=-1;
9977                   regs[i+2].wasconst&=~(1<<hr);
9978                 }
9979               }
9980             }
9981           }
9982         }
9983         else
9984         {
9985           // Non-branch
9986           if(i>0)
9987           {
9988             int d1=0,d2=0,map=-1,temp=-1;
9989             if(get_reg(regs[i].regmap,rt1[i]|64)>=0)
9990             {
9991               d1=dep1[i];
9992               d2=dep2[i];
9993             }
9994             if(using_tlb) {
9995               if(itype[i]==LOAD || itype[i]==LOADLR ||
9996                  itype[i]==STORE || itype[i]==STORELR ||
9997                  itype[i]==C1LS || itype[i]==C2LS)
9998               map=TLREG;
9999             } else if(itype[i]==STORE || itype[i]==STORELR ||
10000                       (opcode[i]&0x3b)==0x39 || (opcode[i]&0x3b)==0x3a) { // SWC1/SDC1 || SWC2/SDC2
10001               map=INVCP;
10002             }
10003             if(itype[i]==LOADLR || itype[i]==STORELR ||
10004                itype[i]==C1LS || itype[i]==C2LS)
10005               temp=FTEMP;
10006             if((regs[i].regmap[hr]&63)!=rt1[i] && (regs[i].regmap[hr]&63)!=rt2[i] &&
10007                (regs[i].regmap[hr]^64)!=us1[i] && (regs[i].regmap[hr]^64)!=us2[i] &&
10008                (regs[i].regmap[hr]^64)!=d1 && (regs[i].regmap[hr]^64)!=d2 &&
10009                regs[i].regmap[hr]!=rs1[i] && regs[i].regmap[hr]!=rs2[i] &&
10010                (regs[i].regmap[hr]&63)!=temp && regs[i].regmap[hr]!=map &&
10011                (itype[i]!=SPAN||regs[i].regmap[hr]!=CCREG))
10012             {
10013               if(i<slen-1&&!is_ds[i]) {
10014                 if(regmap_pre[i+1][hr]!=-1 || regs[i].regmap[hr]!=-1)
10015                 if(regmap_pre[i+1][hr]!=regs[i].regmap[hr])
10016                 if(regs[i].regmap[hr]<64||!((regs[i].was32>>(regs[i].regmap[hr]&63))&1))
10017                 {
10018                   printf("fail: %x (%d %d!=%d)\n",start+i*4,hr,regmap_pre[i+1][hr],regs[i].regmap[hr]);
10019                   assert(regmap_pre[i+1][hr]==regs[i].regmap[hr]);
10020                 }
10021                 regmap_pre[i+1][hr]=-1;
10022                 if(regs[i+1].regmap_entry[hr]==CCREG) regs[i+1].regmap_entry[hr]=-1;
10023                 regs[i+1].wasconst&=~(1<<hr);
10024               }
10025               regs[i].regmap[hr]=-1;
10026               regs[i].isconst&=~(1<<hr);
10027             }
10028           }
10029         }
10030       }
10031     }
10032   }
10033   
10034   /* Pass 5 - Pre-allocate registers */
10035   
10036   // If a register is allocated during a loop, try to allocate it for the
10037   // entire loop, if possible.  This avoids loading/storing registers
10038   // inside of the loop.
10039   
10040   signed char f_regmap[HOST_REGS];
10041   clear_all_regs(f_regmap);
10042   for(i=0;i<slen-1;i++)
10043   {
10044     if(itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10045     {
10046       if(ba[i]>=start && ba[i]<(start+i*4)) 
10047       if(itype[i+1]==NOP||itype[i+1]==MOV||itype[i+1]==ALU
10048       ||itype[i+1]==SHIFTIMM||itype[i+1]==IMM16||itype[i+1]==LOAD
10049       ||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS
10050       ||itype[i+1]==SHIFT||itype[i+1]==COP1||itype[i+1]==FLOAT
10051       ||itype[i+1]==FCOMP||itype[i+1]==FCONV
10052       ||itype[i+1]==COP2||itype[i+1]==C2LS||itype[i+1]==C2OP)
10053       {
10054         int t=(ba[i]-start)>>2;
10055         if(t>0&&(itype[t-1]!=UJUMP&&itype[t-1]!=RJUMP&&itype[t-1]!=CJUMP&&itype[t-1]!=SJUMP&&itype[t-1]!=FJUMP)) // loop_preload can't handle jumps into delay slots
10056         if(t<2||(itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||rt1[t-2]!=31) // call/ret assumes no registers allocated
10057         for(hr=0;hr<HOST_REGS;hr++)
10058         {
10059           if(regs[i].regmap[hr]>64) {
10060             if(!((regs[i].dirty>>hr)&1))
10061               f_regmap[hr]=regs[i].regmap[hr];
10062             else f_regmap[hr]=-1;
10063           }
10064           else if(regs[i].regmap[hr]>=0) {
10065             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10066               // dealloc old register
10067               int n;
10068               for(n=0;n<HOST_REGS;n++)
10069               {
10070                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10071               }
10072               // and alloc new one
10073               f_regmap[hr]=regs[i].regmap[hr];
10074             }
10075           }
10076           if(branch_regs[i].regmap[hr]>64) {
10077             if(!((branch_regs[i].dirty>>hr)&1))
10078               f_regmap[hr]=branch_regs[i].regmap[hr];
10079             else f_regmap[hr]=-1;
10080           }
10081           else if(branch_regs[i].regmap[hr]>=0) {
10082             if(f_regmap[hr]!=branch_regs[i].regmap[hr]) {
10083               // dealloc old register
10084               int n;
10085               for(n=0;n<HOST_REGS;n++)
10086               {
10087                 if(f_regmap[n]==branch_regs[i].regmap[hr]) {f_regmap[n]=-1;}
10088               }
10089               // and alloc new one
10090               f_regmap[hr]=branch_regs[i].regmap[hr];
10091             }
10092           }
10093           if(ooo[i]) {
10094             if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) 
10095               f_regmap[hr]=branch_regs[i].regmap[hr];
10096           }else{
10097             if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) 
10098               f_regmap[hr]=branch_regs[i].regmap[hr];
10099           }
10100           // Avoid dirty->clean transition
10101           #ifdef DESTRUCTIVE_WRITEBACK
10102           if(t>0) if(get_reg(regmap_pre[t],f_regmap[hr])>=0) if((regs[t].wasdirty>>get_reg(regmap_pre[t],f_regmap[hr]))&1) f_regmap[hr]=-1;
10103           #endif
10104           // This check is only strictly required in the DESTRUCTIVE_WRITEBACK
10105           // case above, however it's always a good idea.  We can't hoist the
10106           // load if the register was already allocated, so there's no point
10107           // wasting time analyzing most of these cases.  It only "succeeds"
10108           // when the mapping was different and the load can be replaced with
10109           // a mov, which is of negligible benefit.  So such cases are
10110           // skipped below.
10111           if(f_regmap[hr]>0) {
10112             if(regs[t].regmap[hr]==f_regmap[hr]||(regs[t].regmap_entry[hr]<0&&get_reg(regmap_pre[t],f_regmap[hr])<0)) {
10113               int r=f_regmap[hr];
10114               for(j=t;j<=i;j++)
10115               {
10116                 //printf("Test %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10117                 if(r<34&&((unneeded_reg[j]>>r)&1)) break;
10118                 if(r>63&&((unneeded_reg_upper[j]>>(r&63))&1)) break;
10119                 if(r>63) {
10120                   // NB This can exclude the case where the upper-half
10121                   // register is lower numbered than the lower-half
10122                   // register.  Not sure if it's worth fixing...
10123                   if(get_reg(regs[j].regmap,r&63)<0) break;
10124                   if(get_reg(regs[j].regmap_entry,r&63)<0) break;
10125                   if(regs[j].is32&(1LL<<(r&63))) break;
10126                 }
10127                 if(regs[j].regmap[hr]==f_regmap[hr]&&(f_regmap[hr]&63)<TEMPREG) {
10128                   //printf("Hit %x -> %x, %x %d/%d\n",start+i*4,ba[i],start+j*4,hr,r);
10129                   int k;
10130                   if(regs[i].regmap[hr]==-1&&branch_regs[i].regmap[hr]==-1) {
10131                     if(get_reg(regs[i+2].regmap,f_regmap[hr])>=0) break;
10132                     if(r>63) {
10133                       if(get_reg(regs[i].regmap,r&63)<0) break;
10134                       if(get_reg(branch_regs[i].regmap,r&63)<0) break;
10135                     }
10136                     k=i;
10137                     while(k>1&&regs[k-1].regmap[hr]==-1) {
10138                       if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10139                         //printf("no free regs for store %x\n",start+(k-1)*4);
10140                         break;
10141                       }
10142                       if(get_reg(regs[k-1].regmap,f_regmap[hr])>=0) {
10143                         //printf("no-match due to different register\n");
10144                         break;
10145                       }
10146                       if(itype[k-2]==UJUMP||itype[k-2]==RJUMP||itype[k-2]==CJUMP||itype[k-2]==SJUMP||itype[k-2]==FJUMP) {
10147                         //printf("no-match due to branch\n");
10148                         break;
10149                       }
10150                       // call/ret fast path assumes no registers allocated
10151                       if(k>2&&(itype[k-3]==UJUMP||itype[k-3]==RJUMP)&&rt1[k-3]==31) {
10152                         break;
10153                       }
10154                       if(r>63) {
10155                         // NB This can exclude the case where the upper-half
10156                         // register is lower numbered than the lower-half
10157                         // register.  Not sure if it's worth fixing...
10158                         if(get_reg(regs[k-1].regmap,r&63)<0) break;
10159                         if(regs[k-1].is32&(1LL<<(r&63))) break;
10160                       }
10161                       k--;
10162                     }
10163                     if(i<slen-1) {
10164                       if((regs[k].is32&(1LL<<f_regmap[hr]))!=
10165                         (regs[i+2].was32&(1LL<<f_regmap[hr]))) {
10166                         //printf("bad match after branch\n");
10167                         break;
10168                       }
10169                     }
10170                     if(regs[k-1].regmap[hr]==f_regmap[hr]&&regmap_pre[k][hr]==f_regmap[hr]) {
10171                       //printf("Extend r%d, %x ->\n",hr,start+k*4);
10172                       while(k<i) {
10173                         regs[k].regmap_entry[hr]=f_regmap[hr];
10174                         regs[k].regmap[hr]=f_regmap[hr];
10175                         regmap_pre[k+1][hr]=f_regmap[hr];
10176                         regs[k].wasdirty&=~(1<<hr);
10177                         regs[k].dirty&=~(1<<hr);
10178                         regs[k].wasdirty|=(1<<hr)&regs[k-1].dirty;
10179                         regs[k].dirty|=(1<<hr)&regs[k].wasdirty;
10180                         regs[k].wasconst&=~(1<<hr);
10181                         regs[k].isconst&=~(1<<hr);
10182                         k++;
10183                       }
10184                     }
10185                     else {
10186                       //printf("Fail Extend r%d, %x ->\n",hr,start+k*4);
10187                       break;
10188                     }
10189                     assert(regs[i-1].regmap[hr]==f_regmap[hr]);
10190                     if(regs[i-1].regmap[hr]==f_regmap[hr]&&regmap_pre[i][hr]==f_regmap[hr]) {
10191                       //printf("OK fill %x (r%d)\n",start+i*4,hr);
10192                       regs[i].regmap_entry[hr]=f_regmap[hr];
10193                       regs[i].regmap[hr]=f_regmap[hr];
10194                       regs[i].wasdirty&=~(1<<hr);
10195                       regs[i].dirty&=~(1<<hr);
10196                       regs[i].wasdirty|=(1<<hr)&regs[i-1].dirty;
10197                       regs[i].dirty|=(1<<hr)&regs[i-1].dirty;
10198                       regs[i].wasconst&=~(1<<hr);
10199                       regs[i].isconst&=~(1<<hr);
10200                       branch_regs[i].regmap_entry[hr]=f_regmap[hr];
10201                       branch_regs[i].wasdirty&=~(1<<hr);
10202                       branch_regs[i].wasdirty|=(1<<hr)&regs[i].dirty;
10203                       branch_regs[i].regmap[hr]=f_regmap[hr];
10204                       branch_regs[i].dirty&=~(1<<hr);
10205                       branch_regs[i].dirty|=(1<<hr)&regs[i].dirty;
10206                       branch_regs[i].wasconst&=~(1<<hr);
10207                       branch_regs[i].isconst&=~(1<<hr);
10208                       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000) {
10209                         regmap_pre[i+2][hr]=f_regmap[hr];
10210                         regs[i+2].wasdirty&=~(1<<hr);
10211                         regs[i+2].wasdirty|=(1<<hr)&regs[i].dirty;
10212                         assert((branch_regs[i].is32&(1LL<<f_regmap[hr]))==
10213                           (regs[i+2].was32&(1LL<<f_regmap[hr])));
10214                       }
10215                     }
10216                   }
10217                   for(k=t;k<j;k++) {
10218                     // Alloc register clean at beginning of loop,
10219                     // but may dirty it in pass 6
10220                     regs[k].regmap_entry[hr]=f_regmap[hr];
10221                     regs[k].regmap[hr]=f_regmap[hr];
10222                     regs[k].dirty&=~(1<<hr);
10223                     regs[k].wasconst&=~(1<<hr);
10224                     regs[k].isconst&=~(1<<hr);
10225                     if(itype[k]==UJUMP||itype[k]==RJUMP||itype[k]==CJUMP||itype[k]==SJUMP||itype[k]==FJUMP) {
10226                       branch_regs[k].regmap_entry[hr]=f_regmap[hr];
10227                       branch_regs[k].regmap[hr]=f_regmap[hr];
10228                       branch_regs[k].dirty&=~(1<<hr);
10229                       branch_regs[k].wasconst&=~(1<<hr);
10230                       branch_regs[k].isconst&=~(1<<hr);
10231                       if(itype[k]!=RJUMP&&itype[k]!=UJUMP&&(source[k]>>16)!=0x1000) {
10232                         regmap_pre[k+2][hr]=f_regmap[hr];
10233                         regs[k+2].wasdirty&=~(1<<hr);
10234                         assert((branch_regs[k].is32&(1LL<<f_regmap[hr]))==
10235                           (regs[k+2].was32&(1LL<<f_regmap[hr])));
10236                       }
10237                     }
10238                     else
10239                     {
10240                       regmap_pre[k+1][hr]=f_regmap[hr];
10241                       regs[k+1].wasdirty&=~(1<<hr);
10242                     }
10243                   }
10244                   if(regs[j].regmap[hr]==f_regmap[hr])
10245                     regs[j].regmap_entry[hr]=f_regmap[hr];
10246                   break;
10247                 }
10248                 if(j==i) break;
10249                 if(regs[j].regmap[hr]>=0)
10250                   break;
10251                 if(get_reg(regs[j].regmap,f_regmap[hr])>=0) {
10252                   //printf("no-match due to different register\n");
10253                   break;
10254                 }
10255                 if((regs[j+1].is32&(1LL<<f_regmap[hr]))!=(regs[j].is32&(1LL<<f_regmap[hr]))) {
10256                   //printf("32/64 mismatch %x %d\n",start+j*4,hr);
10257                   break;
10258                 }
10259                 if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10260                 {
10261                   // Stop on unconditional branch
10262                   break;
10263                 }
10264                 if(itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP)
10265                 {
10266                   if(ooo[j]) {
10267                     if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) 
10268                       break;
10269                   }else{
10270                     if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) 
10271                       break;
10272                   }
10273                   if(get_reg(branch_regs[j].regmap,f_regmap[hr])>=0) {
10274                     //printf("no-match due to different register (branch)\n");
10275                     break;
10276                   }
10277                 }
10278                 if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10279                   //printf("No free regs for store %x\n",start+j*4);
10280                   break;
10281                 }
10282                 if(f_regmap[hr]>=64) {
10283                   if(regs[j].is32&(1LL<<(f_regmap[hr]&63))) {
10284                     break;
10285                   }
10286                   else
10287                   {
10288                     if(get_reg(regs[j].regmap,f_regmap[hr]&63)<0) {
10289                       break;
10290                     }
10291                   }
10292                 }
10293               }
10294             }
10295           }
10296         }
10297       }
10298     }else{
10299       // Non branch or undetermined branch target
10300       for(hr=0;hr<HOST_REGS;hr++)
10301       {
10302         if(hr!=EXCLUDE_REG) {
10303           if(regs[i].regmap[hr]>64) {
10304             if(!((regs[i].dirty>>hr)&1))
10305               f_regmap[hr]=regs[i].regmap[hr];
10306           }
10307           else if(regs[i].regmap[hr]>=0) {
10308             if(f_regmap[hr]!=regs[i].regmap[hr]) {
10309               // dealloc old register
10310               int n;
10311               for(n=0;n<HOST_REGS;n++)
10312               {
10313                 if(f_regmap[n]==regs[i].regmap[hr]) {f_regmap[n]=-1;}
10314               }
10315               // and alloc new one
10316               f_regmap[hr]=regs[i].regmap[hr];
10317             }
10318           }
10319         }
10320       }
10321       // Try to restore cycle count at branch targets
10322       if(bt[i]) {
10323         for(j=i;j<slen-1;j++) {
10324           if(regs[j].regmap[HOST_CCREG]!=-1) break;
10325           if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) {
10326             //printf("no free regs for store %x\n",start+j*4);
10327             break;
10328           }
10329         }
10330         if(regs[j].regmap[HOST_CCREG]==CCREG) {
10331           int k=i;
10332           //printf("Extend CC, %x -> %x\n",start+k*4,start+j*4);
10333           while(k<j) {
10334             regs[k].regmap_entry[HOST_CCREG]=CCREG;
10335             regs[k].regmap[HOST_CCREG]=CCREG;
10336             regmap_pre[k+1][HOST_CCREG]=CCREG;
10337             regs[k+1].wasdirty|=1<<HOST_CCREG;
10338             regs[k].dirty|=1<<HOST_CCREG;
10339             regs[k].wasconst&=~(1<<HOST_CCREG);
10340             regs[k].isconst&=~(1<<HOST_CCREG);
10341             k++;
10342           }
10343           regs[j].regmap_entry[HOST_CCREG]=CCREG;          
10344         }
10345         // Work backwards from the branch target
10346         if(j>i&&f_regmap[HOST_CCREG]==CCREG)
10347         {
10348           //printf("Extend backwards\n");
10349           int k;
10350           k=i;
10351           while(regs[k-1].regmap[HOST_CCREG]==-1) {
10352             if(count_free_regs(regs[k-1].regmap)<=minimum_free_regs[k-1]) {
10353               //printf("no free regs for store %x\n",start+(k-1)*4);
10354               break;
10355             }
10356             k--;
10357           }
10358           if(regs[k-1].regmap[HOST_CCREG]==CCREG) {
10359             //printf("Extend CC, %x ->\n",start+k*4);
10360             while(k<=i) {
10361               regs[k].regmap_entry[HOST_CCREG]=CCREG;
10362               regs[k].regmap[HOST_CCREG]=CCREG;
10363               regmap_pre[k+1][HOST_CCREG]=CCREG;
10364               regs[k+1].wasdirty|=1<<HOST_CCREG;
10365               regs[k].dirty|=1<<HOST_CCREG;
10366               regs[k].wasconst&=~(1<<HOST_CCREG);
10367               regs[k].isconst&=~(1<<HOST_CCREG);
10368               k++;
10369             }
10370           }
10371           else {
10372             //printf("Fail Extend CC, %x ->\n",start+k*4);
10373           }
10374         }
10375       }
10376       if(itype[i]!=STORE&&itype[i]!=STORELR&&itype[i]!=C1LS&&itype[i]!=SHIFT&&
10377          itype[i]!=NOP&&itype[i]!=MOV&&itype[i]!=ALU&&itype[i]!=SHIFTIMM&&
10378          itype[i]!=IMM16&&itype[i]!=LOAD&&itype[i]!=COP1&&itype[i]!=FLOAT&&
10379          itype[i]!=FCONV&&itype[i]!=FCOMP)
10380       {
10381         memcpy(f_regmap,regs[i].regmap,sizeof(f_regmap));
10382       }
10383     }
10384   }
10385   
10386   // Cache memory offset or tlb map pointer if a register is available
10387   #ifndef HOST_IMM_ADDR32
10388   #ifndef RAM_OFFSET
10389   if(using_tlb)
10390   #endif
10391   {
10392     int earliest_available[HOST_REGS];
10393     int loop_start[HOST_REGS];
10394     int score[HOST_REGS];
10395     int end[HOST_REGS];
10396     int reg=using_tlb?MMREG:ROREG;
10397
10398     // Init
10399     for(hr=0;hr<HOST_REGS;hr++) {
10400       score[hr]=0;earliest_available[hr]=0;
10401       loop_start[hr]=MAXBLOCK;
10402     }
10403     for(i=0;i<slen-1;i++)
10404     {
10405       // Can't do anything if no registers are available
10406       if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i]) {
10407         for(hr=0;hr<HOST_REGS;hr++) {
10408           score[hr]=0;earliest_available[hr]=i+1;
10409           loop_start[hr]=MAXBLOCK;
10410         }
10411       }
10412       if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10413         if(!ooo[i]) {
10414           if(count_free_regs(branch_regs[i].regmap)<=minimum_free_regs[i+1]) {
10415             for(hr=0;hr<HOST_REGS;hr++) {
10416               score[hr]=0;earliest_available[hr]=i+1;
10417               loop_start[hr]=MAXBLOCK;
10418             }
10419           }
10420         }else{
10421           if(count_free_regs(regs[i].regmap)<=minimum_free_regs[i+1]) {
10422             for(hr=0;hr<HOST_REGS;hr++) {
10423               score[hr]=0;earliest_available[hr]=i+1;
10424               loop_start[hr]=MAXBLOCK;
10425             }
10426           }
10427         }
10428       }
10429       // Mark unavailable registers
10430       for(hr=0;hr<HOST_REGS;hr++) {
10431         if(regs[i].regmap[hr]>=0) {
10432           score[hr]=0;earliest_available[hr]=i+1;
10433           loop_start[hr]=MAXBLOCK;
10434         }
10435         if(itype[i]==UJUMP||itype[i]==RJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
10436           if(branch_regs[i].regmap[hr]>=0) {
10437             score[hr]=0;earliest_available[hr]=i+2;
10438             loop_start[hr]=MAXBLOCK;
10439           }
10440         }
10441       }
10442       // No register allocations after unconditional jumps
10443       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
10444       {
10445         for(hr=0;hr<HOST_REGS;hr++) {
10446           score[hr]=0;earliest_available[hr]=i+2;
10447           loop_start[hr]=MAXBLOCK;
10448         }
10449         i++; // Skip delay slot too
10450         //printf("skip delay slot: %x\n",start+i*4);
10451       }
10452       else
10453       // Possible match
10454       if(itype[i]==LOAD||itype[i]==LOADLR||
10455          itype[i]==STORE||itype[i]==STORELR||itype[i]==C1LS) {
10456         for(hr=0;hr<HOST_REGS;hr++) {
10457           if(hr!=EXCLUDE_REG) {
10458             end[hr]=i-1;
10459             for(j=i;j<slen-1;j++) {
10460               if(regs[j].regmap[hr]>=0) break;
10461               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10462                 if(branch_regs[j].regmap[hr]>=0) break;
10463                 if(ooo[j]) {
10464                   if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j+1]) break;
10465                 }else{
10466                   if(count_free_regs(branch_regs[j].regmap)<=minimum_free_regs[j+1]) break;
10467                 }
10468               }
10469               else if(count_free_regs(regs[j].regmap)<=minimum_free_regs[j]) break;
10470               if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10471                 int t=(ba[j]-start)>>2;
10472                 if(t<j&&t>=earliest_available[hr]) {
10473                   if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) { // call/ret assumes no registers allocated
10474                     // Score a point for hoisting loop invariant
10475                     if(t<loop_start[hr]) loop_start[hr]=t;
10476                     //printf("set loop_start: i=%x j=%x (%x)\n",start+i*4,start+j*4,start+t*4);
10477                     score[hr]++;
10478                     end[hr]=j;
10479                   }
10480                 }
10481                 else if(t<j) {
10482                   if(regs[t].regmap[hr]==reg) {
10483                     // Score a point if the branch target matches this register
10484                     score[hr]++;
10485                     end[hr]=j;
10486                   }
10487                 }
10488                 if(itype[j+1]==LOAD||itype[j+1]==LOADLR||
10489                    itype[j+1]==STORE||itype[j+1]==STORELR||itype[j+1]==C1LS) {
10490                   score[hr]++;
10491                   end[hr]=j;
10492                 }
10493               }
10494               if(itype[j]==UJUMP||itype[j]==RJUMP||(source[j]>>16)==0x1000)
10495               {
10496                 // Stop on unconditional branch
10497                 break;
10498               }
10499               else
10500               if(itype[j]==LOAD||itype[j]==LOADLR||
10501                  itype[j]==STORE||itype[j]==STORELR||itype[j]==C1LS) {
10502                 score[hr]++;
10503                 end[hr]=j;
10504               }
10505             }
10506           }
10507         }
10508         // Find highest score and allocate that register
10509         int maxscore=0;
10510         for(hr=0;hr<HOST_REGS;hr++) {
10511           if(hr!=EXCLUDE_REG) {
10512             if(score[hr]>score[maxscore]) {
10513               maxscore=hr;
10514               //printf("highest score: %d %d (%x->%x)\n",score[hr],hr,start+i*4,start+end[hr]*4);
10515             }
10516           }
10517         }
10518         if(score[maxscore]>1)
10519         {
10520           if(i<loop_start[maxscore]) loop_start[maxscore]=i;
10521           for(j=loop_start[maxscore];j<slen&&j<=end[maxscore];j++) {
10522             //if(regs[j].regmap[maxscore]>=0) {printf("oops: %x %x was %d=%d\n",loop_start[maxscore]*4+start,j*4+start,maxscore,regs[j].regmap[maxscore]);}
10523             assert(regs[j].regmap[maxscore]<0);
10524             if(j>loop_start[maxscore]) regs[j].regmap_entry[maxscore]=reg;
10525             regs[j].regmap[maxscore]=reg;
10526             regs[j].dirty&=~(1<<maxscore);
10527             regs[j].wasconst&=~(1<<maxscore);
10528             regs[j].isconst&=~(1<<maxscore);
10529             if(itype[j]==UJUMP||itype[j]==RJUMP||itype[j]==CJUMP||itype[j]==SJUMP||itype[j]==FJUMP) {
10530               branch_regs[j].regmap[maxscore]=reg;
10531               branch_regs[j].wasdirty&=~(1<<maxscore);
10532               branch_regs[j].dirty&=~(1<<maxscore);
10533               branch_regs[j].wasconst&=~(1<<maxscore);
10534               branch_regs[j].isconst&=~(1<<maxscore);
10535               if(itype[j]!=RJUMP&&itype[j]!=UJUMP&&(source[j]>>16)!=0x1000) {
10536                 regmap_pre[j+2][maxscore]=reg;
10537                 regs[j+2].wasdirty&=~(1<<maxscore);
10538               }
10539               // loop optimization (loop_preload)
10540               int t=(ba[j]-start)>>2;
10541               if(t==loop_start[maxscore]) {
10542                 if(t==1||(t>1&&itype[t-2]!=UJUMP&&itype[t-2]!=RJUMP)||(t>1&&rt1[t-2]!=31)) // call/ret assumes no registers allocated
10543                   regs[t].regmap_entry[maxscore]=reg;
10544               }
10545             }
10546             else
10547             {
10548               if(j<1||(itype[j-1]!=RJUMP&&itype[j-1]!=UJUMP&&itype[j-1]!=CJUMP&&itype[j-1]!=SJUMP&&itype[j-1]!=FJUMP)) {
10549                 regmap_pre[j+1][maxscore]=reg;
10550                 regs[j+1].wasdirty&=~(1<<maxscore);
10551               }
10552             }
10553           }
10554           i=j-1;
10555           if(itype[j-1]==RJUMP||itype[j-1]==UJUMP||itype[j-1]==CJUMP||itype[j-1]==SJUMP||itype[j-1]==FJUMP) i++; // skip delay slot
10556           for(hr=0;hr<HOST_REGS;hr++) {
10557             score[hr]=0;earliest_available[hr]=i+i;
10558             loop_start[hr]=MAXBLOCK;
10559           }
10560         }
10561       }
10562     }
10563   }
10564   #endif
10565   
10566   // This allocates registers (if possible) one instruction prior
10567   // to use, which can avoid a load-use penalty on certain CPUs.
10568   for(i=0;i<slen-1;i++)
10569   {
10570     if(!i||(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP))
10571     {
10572       if(!bt[i+1])
10573       {
10574         if(itype[i]==ALU||itype[i]==MOV||itype[i]==LOAD||itype[i]==SHIFTIMM||itype[i]==IMM16
10575            ||((itype[i]==COP1||itype[i]==COP2)&&opcode2[i]<3))
10576         {
10577           if(rs1[i+1]) {
10578             if((hr=get_reg(regs[i+1].regmap,rs1[i+1]))>=0)
10579             {
10580               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10581               {
10582                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10583                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10584                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10585                 regs[i].isconst&=~(1<<hr);
10586                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10587                 constmap[i][hr]=constmap[i+1][hr];
10588                 regs[i+1].wasdirty&=~(1<<hr);
10589                 regs[i].dirty&=~(1<<hr);
10590               }
10591             }
10592           }
10593           if(rs2[i+1]) {
10594             if((hr=get_reg(regs[i+1].regmap,rs2[i+1]))>=0)
10595             {
10596               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10597               {
10598                 regs[i].regmap[hr]=regs[i+1].regmap[hr];
10599                 regmap_pre[i+1][hr]=regs[i+1].regmap[hr];
10600                 regs[i+1].regmap_entry[hr]=regs[i+1].regmap[hr];
10601                 regs[i].isconst&=~(1<<hr);
10602                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10603                 constmap[i][hr]=constmap[i+1][hr];
10604                 regs[i+1].wasdirty&=~(1<<hr);
10605                 regs[i].dirty&=~(1<<hr);
10606               }
10607             }
10608           }
10609           // Preload target address for load instruction (non-constant)
10610           if(itype[i+1]==LOAD&&rs1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10611             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10612             {
10613               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10614               {
10615                 regs[i].regmap[hr]=rs1[i+1];
10616                 regmap_pre[i+1][hr]=rs1[i+1];
10617                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10618                 regs[i].isconst&=~(1<<hr);
10619                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10620                 constmap[i][hr]=constmap[i+1][hr];
10621                 regs[i+1].wasdirty&=~(1<<hr);
10622                 regs[i].dirty&=~(1<<hr);
10623               }
10624             }
10625           }
10626           // Load source into target register 
10627           if(lt1[i+1]&&get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10628             if((hr=get_reg(regs[i+1].regmap,rt1[i+1]))>=0)
10629             {
10630               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10631               {
10632                 regs[i].regmap[hr]=rs1[i+1];
10633                 regmap_pre[i+1][hr]=rs1[i+1];
10634                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10635                 regs[i].isconst&=~(1<<hr);
10636                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10637                 constmap[i][hr]=constmap[i+1][hr];
10638                 regs[i+1].wasdirty&=~(1<<hr);
10639                 regs[i].dirty&=~(1<<hr);
10640               }
10641             }
10642           }
10643           // Preload map address
10644           #ifndef HOST_IMM_ADDR32
10645           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR||itype[i+1]==C1LS||itype[i+1]==C2LS) {
10646             hr=get_reg(regs[i+1].regmap,TLREG);
10647             if(hr>=0) {
10648               int sr=get_reg(regs[i+1].regmap,rs1[i+1]);
10649               if(sr>=0&&((regs[i+1].wasconst>>sr)&1)) {
10650                 int nr;
10651                 if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10652                 {
10653                   regs[i].regmap[hr]=MGEN1+((i+1)&1);
10654                   regmap_pre[i+1][hr]=MGEN1+((i+1)&1);
10655                   regs[i+1].regmap_entry[hr]=MGEN1+((i+1)&1);
10656                   regs[i].isconst&=~(1<<hr);
10657                   regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10658                   constmap[i][hr]=constmap[i+1][hr];
10659                   regs[i+1].wasdirty&=~(1<<hr);
10660                   regs[i].dirty&=~(1<<hr);
10661                 }
10662                 else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10663                 {
10664                   // move it to another register
10665                   regs[i+1].regmap[hr]=-1;
10666                   regmap_pre[i+2][hr]=-1;
10667                   regs[i+1].regmap[nr]=TLREG;
10668                   regmap_pre[i+2][nr]=TLREG;
10669                   regs[i].regmap[nr]=MGEN1+((i+1)&1);
10670                   regmap_pre[i+1][nr]=MGEN1+((i+1)&1);
10671                   regs[i+1].regmap_entry[nr]=MGEN1+((i+1)&1);
10672                   regs[i].isconst&=~(1<<nr);
10673                   regs[i+1].isconst&=~(1<<nr);
10674                   regs[i].dirty&=~(1<<nr);
10675                   regs[i+1].wasdirty&=~(1<<nr);
10676                   regs[i+1].dirty&=~(1<<nr);
10677                   regs[i+2].wasdirty&=~(1<<nr);
10678                 }
10679               }
10680             }
10681           }
10682           #endif
10683           // Address for store instruction (non-constant)
10684           if(itype[i+1]==STORE||itype[i+1]==STORELR
10685              ||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SB/SH/SW/SD/SWC1/SDC1/SWC2/SDC2
10686             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10687               hr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1);
10688               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10689               else {regs[i+1].regmap[hr]=AGEN1+((i+1)&1);regs[i+1].isconst&=~(1<<hr);}
10690               assert(hr>=0);
10691               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10692               {
10693                 regs[i].regmap[hr]=rs1[i+1];
10694                 regmap_pre[i+1][hr]=rs1[i+1];
10695                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10696                 regs[i].isconst&=~(1<<hr);
10697                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10698                 constmap[i][hr]=constmap[i+1][hr];
10699                 regs[i+1].wasdirty&=~(1<<hr);
10700                 regs[i].dirty&=~(1<<hr);
10701               }
10702             }
10703           }
10704           if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) { // LWC1/LDC1, LWC2/LDC2
10705             if(get_reg(regs[i+1].regmap,rs1[i+1])<0) {
10706               int nr;
10707               hr=get_reg(regs[i+1].regmap,FTEMP);
10708               assert(hr>=0);
10709               if(regs[i].regmap[hr]<0&&regs[i+1].regmap_entry[hr]<0)
10710               {
10711                 regs[i].regmap[hr]=rs1[i+1];
10712                 regmap_pre[i+1][hr]=rs1[i+1];
10713                 regs[i+1].regmap_entry[hr]=rs1[i+1];
10714                 regs[i].isconst&=~(1<<hr);
10715                 regs[i].isconst|=regs[i+1].isconst&(1<<hr);
10716                 constmap[i][hr]=constmap[i+1][hr];
10717                 regs[i+1].wasdirty&=~(1<<hr);
10718                 regs[i].dirty&=~(1<<hr);
10719               }
10720               else if((nr=get_reg2(regs[i].regmap,regs[i+1].regmap,-1))>=0)
10721               {
10722                 // move it to another register
10723                 regs[i+1].regmap[hr]=-1;
10724                 regmap_pre[i+2][hr]=-1;
10725                 regs[i+1].regmap[nr]=FTEMP;
10726                 regmap_pre[i+2][nr]=FTEMP;
10727                 regs[i].regmap[nr]=rs1[i+1];
10728                 regmap_pre[i+1][nr]=rs1[i+1];
10729                 regs[i+1].regmap_entry[nr]=rs1[i+1];
10730                 regs[i].isconst&=~(1<<nr);
10731                 regs[i+1].isconst&=~(1<<nr);
10732                 regs[i].dirty&=~(1<<nr);
10733                 regs[i+1].wasdirty&=~(1<<nr);
10734                 regs[i+1].dirty&=~(1<<nr);
10735                 regs[i+2].wasdirty&=~(1<<nr);
10736               }
10737             }
10738           }
10739           if(itype[i+1]==LOAD||itype[i+1]==LOADLR||itype[i+1]==STORE||itype[i+1]==STORELR/*||itype[i+1]==C1LS||||itype[i+1]==C2LS*/) {
10740             if(itype[i+1]==LOAD) 
10741               hr=get_reg(regs[i+1].regmap,rt1[i+1]);
10742             if(itype[i+1]==LOADLR||(opcode[i+1]&0x3b)==0x31||(opcode[i+1]&0x3b)==0x32) // LWC1/LDC1, LWC2/LDC2
10743               hr=get_reg(regs[i+1].regmap,FTEMP);
10744             if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a) { // SWC1/SDC1/SWC2/SDC2
10745               hr=get_reg(regs[i+1].regmap,AGEN1+((i+1)&1));
10746               if(hr<0) hr=get_reg(regs[i+1].regmap,-1);
10747             }
10748             if(hr>=0&&regs[i].regmap[hr]<0) {
10749               int rs=get_reg(regs[i+1].regmap,rs1[i+1]);
10750               if(rs>=0&&((regs[i+1].wasconst>>rs)&1)) {
10751                 regs[i].regmap[hr]=AGEN1+((i+1)&1);
10752                 regmap_pre[i+1][hr]=AGEN1+((i+1)&1);
10753                 regs[i+1].regmap_entry[hr]=AGEN1+((i+1)&1);
10754                 regs[i].isconst&=~(1<<hr);
10755                 regs[i+1].wasdirty&=~(1<<hr);
10756                 regs[i].dirty&=~(1<<hr);
10757               }
10758             }
10759           }
10760         }
10761       }
10762     }
10763   }
10764   
10765   /* Pass 6 - Optimize clean/dirty state */
10766   clean_registers(0,slen-1,1);
10767   
10768   /* Pass 7 - Identify 32-bit registers */
10769 #ifndef FORCE32
10770   provisional_r32();
10771
10772   u_int r32=0;
10773   
10774   for (i=slen-1;i>=0;i--)
10775   {
10776     int hr;
10777     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10778     {
10779       if(ba[i]<start || ba[i]>=(start+slen*4))
10780       {
10781         // Branch out of this block, don't need anything
10782         r32=0;
10783       }
10784       else
10785       {
10786         // Internal branch
10787         // Need whatever matches the target
10788         // (and doesn't get overwritten by the delay slot instruction)
10789         r32=0;
10790         int t=(ba[i]-start)>>2;
10791         if(ba[i]>start+i*4) {
10792           // Forward branch
10793           if(!(requires_32bit[t]&~regs[i].was32))
10794             r32|=requires_32bit[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10795         }else{
10796           // Backward branch
10797           //if(!(regs[t].was32&~unneeded_reg_upper[t]&~regs[i].was32))
10798           //  r32|=regs[t].was32&~unneeded_reg_upper[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10799           if(!(pr32[t]&~regs[i].was32))
10800             r32|=pr32[t]&(~(1LL<<rt1[i+1]))&(~(1LL<<rt2[i+1]));
10801         }
10802       }
10803       // Conditional branch may need registers for following instructions
10804       if(itype[i]!=RJUMP&&itype[i]!=UJUMP&&(source[i]>>16)!=0x1000)
10805       {
10806         if(i<slen-2) {
10807           r32|=requires_32bit[i+2];
10808           r32&=regs[i].was32;
10809           // Mark this address as a branch target since it may be called
10810           // upon return from interrupt
10811           bt[i+2]=1;
10812         }
10813       }
10814       // Merge in delay slot
10815       if(!likely[i]) {
10816         // These are overwritten unless the branch is "likely"
10817         // and the delay slot is nullified if not taken
10818         r32&=~(1LL<<rt1[i+1]);
10819         r32&=~(1LL<<rt2[i+1]);
10820       }
10821       // Assume these are needed (delay slot)
10822       if(us1[i+1]>0)
10823       {
10824         if((regs[i].was32>>us1[i+1])&1) r32|=1LL<<us1[i+1];
10825       }
10826       if(us2[i+1]>0)
10827       {
10828         if((regs[i].was32>>us2[i+1])&1) r32|=1LL<<us2[i+1];
10829       }
10830       if(dep1[i+1]&&!((unneeded_reg_upper[i]>>dep1[i+1])&1))
10831       {
10832         if((regs[i].was32>>dep1[i+1])&1) r32|=1LL<<dep1[i+1];
10833       }
10834       if(dep2[i+1]&&!((unneeded_reg_upper[i]>>dep2[i+1])&1))
10835       {
10836         if((regs[i].was32>>dep2[i+1])&1) r32|=1LL<<dep2[i+1];
10837       }
10838     }
10839     else if(itype[i]==SYSCALL||itype[i]==HLECALL||itype[i]==INTCALL)
10840     {
10841       // SYSCALL instruction (software interrupt)
10842       r32=0;
10843     }
10844     else if(itype[i]==COP0 && (source[i]&0x3f)==0x18)
10845     {
10846       // ERET instruction (return from interrupt)
10847       r32=0;
10848     }
10849     // Check 32 bits
10850     r32&=~(1LL<<rt1[i]);
10851     r32&=~(1LL<<rt2[i]);
10852     if(us1[i]>0)
10853     {
10854       if((regs[i].was32>>us1[i])&1) r32|=1LL<<us1[i];
10855     }
10856     if(us2[i]>0)
10857     {
10858       if((regs[i].was32>>us2[i])&1) r32|=1LL<<us2[i];
10859     }
10860     if(dep1[i]&&!((unneeded_reg_upper[i]>>dep1[i])&1))
10861     {
10862       if((regs[i].was32>>dep1[i])&1) r32|=1LL<<dep1[i];
10863     }
10864     if(dep2[i]&&!((unneeded_reg_upper[i]>>dep2[i])&1))
10865     {
10866       if((regs[i].was32>>dep2[i])&1) r32|=1LL<<dep2[i];
10867     }
10868     requires_32bit[i]=r32;
10869     
10870     // Dirty registers which are 32-bit, require 32-bit input
10871     // as they will be written as 32-bit values
10872     for(hr=0;hr<HOST_REGS;hr++)
10873     {
10874       if(regs[i].regmap_entry[hr]>0&&regs[i].regmap_entry[hr]<64) {
10875         if((regs[i].was32>>regs[i].regmap_entry[hr])&(regs[i].wasdirty>>hr)&1) {
10876           if(!((unneeded_reg_upper[i]>>regs[i].regmap_entry[hr])&1))
10877           requires_32bit[i]|=1LL<<regs[i].regmap_entry[hr];
10878         }
10879       }
10880     }
10881     //requires_32bit[i]=is32[i]&~unneeded_reg_upper[i]; // DEBUG
10882   }
10883 #else
10884   for (i=slen-1;i>=0;i--)
10885   {
10886     if(itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
10887     {
10888       // Conditional branch
10889       if((source[i]>>16)!=0x1000&&i<slen-2) {
10890         // Mark this address as a branch target since it may be called
10891         // upon return from interrupt
10892         bt[i+2]=1;
10893       }
10894     }
10895   }
10896 #endif
10897
10898   if(itype[slen-1]==SPAN) {
10899     bt[slen-1]=1; // Mark as a branch target so instruction can restart after exception
10900   }
10901
10902 #ifdef DISASM
10903   /* Debug/disassembly */
10904   for(i=0;i<slen;i++)
10905   {
10906     printf("U:");
10907     int r;
10908     for(r=1;r<=CCREG;r++) {
10909       if((unneeded_reg[i]>>r)&1) {
10910         if(r==HIREG) printf(" HI");
10911         else if(r==LOREG) printf(" LO");
10912         else printf(" r%d",r);
10913       }
10914     }
10915 #ifndef FORCE32
10916     printf(" UU:");
10917     for(r=1;r<=CCREG;r++) {
10918       if(((unneeded_reg_upper[i]&~unneeded_reg[i])>>r)&1) {
10919         if(r==HIREG) printf(" HI");
10920         else if(r==LOREG) printf(" LO");
10921         else printf(" r%d",r);
10922       }
10923     }
10924     printf(" 32:");
10925     for(r=0;r<=CCREG;r++) {
10926       //if(((is32[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10927       if((regs[i].was32>>r)&1) {
10928         if(r==CCREG) printf(" CC");
10929         else if(r==HIREG) printf(" HI");
10930         else if(r==LOREG) printf(" LO");
10931         else printf(" r%d",r);
10932       }
10933     }
10934 #endif
10935     printf("\n");
10936     #if defined(__i386__) || defined(__x86_64__)
10937     printf("pre: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7]);
10938     #endif
10939     #ifdef __arm__
10940     printf("pre: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regmap_pre[i][0],regmap_pre[i][1],regmap_pre[i][2],regmap_pre[i][3],regmap_pre[i][4],regmap_pre[i][5],regmap_pre[i][6],regmap_pre[i][7],regmap_pre[i][8],regmap_pre[i][9],regmap_pre[i][10],regmap_pre[i][12]);
10941     #endif
10942     printf("needs: ");
10943     if(needed_reg[i]&1) printf("eax ");
10944     if((needed_reg[i]>>1)&1) printf("ecx ");
10945     if((needed_reg[i]>>2)&1) printf("edx ");
10946     if((needed_reg[i]>>3)&1) printf("ebx ");
10947     if((needed_reg[i]>>5)&1) printf("ebp ");
10948     if((needed_reg[i]>>6)&1) printf("esi ");
10949     if((needed_reg[i]>>7)&1) printf("edi ");
10950     printf("r:");
10951     for(r=0;r<=CCREG;r++) {
10952       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10953       if((requires_32bit[i]>>r)&1) {
10954         if(r==CCREG) printf(" CC");
10955         else if(r==HIREG) printf(" HI");
10956         else if(r==LOREG) printf(" LO");
10957         else printf(" r%d",r);
10958       }
10959     }
10960     printf("\n");
10961     /*printf("pr:");
10962     for(r=0;r<=CCREG;r++) {
10963       //if(((requires_32bit[i]>>r)&(~unneeded_reg[i]>>r))&1) {
10964       if((pr32[i]>>r)&1) {
10965         if(r==CCREG) printf(" CC");
10966         else if(r==HIREG) printf(" HI");
10967         else if(r==LOREG) printf(" LO");
10968         else printf(" r%d",r);
10969       }
10970     }
10971     if(pr32[i]!=requires_32bit[i]) printf(" OOPS");
10972     printf("\n");*/
10973     #if defined(__i386__) || defined(__x86_64__)
10974     printf("entry: eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7]);
10975     printf("dirty: ");
10976     if(regs[i].wasdirty&1) printf("eax ");
10977     if((regs[i].wasdirty>>1)&1) printf("ecx ");
10978     if((regs[i].wasdirty>>2)&1) printf("edx ");
10979     if((regs[i].wasdirty>>3)&1) printf("ebx ");
10980     if((regs[i].wasdirty>>5)&1) printf("ebp ");
10981     if((regs[i].wasdirty>>6)&1) printf("esi ");
10982     if((regs[i].wasdirty>>7)&1) printf("edi ");
10983     #endif
10984     #ifdef __arm__
10985     printf("entry: r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d\n",regs[i].regmap_entry[0],regs[i].regmap_entry[1],regs[i].regmap_entry[2],regs[i].regmap_entry[3],regs[i].regmap_entry[4],regs[i].regmap_entry[5],regs[i].regmap_entry[6],regs[i].regmap_entry[7],regs[i].regmap_entry[8],regs[i].regmap_entry[9],regs[i].regmap_entry[10],regs[i].regmap_entry[12]);
10986     printf("dirty: ");
10987     if(regs[i].wasdirty&1) printf("r0 ");
10988     if((regs[i].wasdirty>>1)&1) printf("r1 ");
10989     if((regs[i].wasdirty>>2)&1) printf("r2 ");
10990     if((regs[i].wasdirty>>3)&1) printf("r3 ");
10991     if((regs[i].wasdirty>>4)&1) printf("r4 ");
10992     if((regs[i].wasdirty>>5)&1) printf("r5 ");
10993     if((regs[i].wasdirty>>6)&1) printf("r6 ");
10994     if((regs[i].wasdirty>>7)&1) printf("r7 ");
10995     if((regs[i].wasdirty>>8)&1) printf("r8 ");
10996     if((regs[i].wasdirty>>9)&1) printf("r9 ");
10997     if((regs[i].wasdirty>>10)&1) printf("r10 ");
10998     if((regs[i].wasdirty>>12)&1) printf("r12 ");
10999     #endif
11000     printf("\n");
11001     disassemble_inst(i);
11002     //printf ("ccadj[%d] = %d\n",i,ccadj[i]);
11003     #if defined(__i386__) || defined(__x86_64__)
11004     printf("eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7]);
11005     if(regs[i].dirty&1) printf("eax ");
11006     if((regs[i].dirty>>1)&1) printf("ecx ");
11007     if((regs[i].dirty>>2)&1) printf("edx ");
11008     if((regs[i].dirty>>3)&1) printf("ebx ");
11009     if((regs[i].dirty>>5)&1) printf("ebp ");
11010     if((regs[i].dirty>>6)&1) printf("esi ");
11011     if((regs[i].dirty>>7)&1) printf("edi ");
11012     #endif
11013     #ifdef __arm__
11014     printf("r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",regs[i].regmap[0],regs[i].regmap[1],regs[i].regmap[2],regs[i].regmap[3],regs[i].regmap[4],regs[i].regmap[5],regs[i].regmap[6],regs[i].regmap[7],regs[i].regmap[8],regs[i].regmap[9],regs[i].regmap[10],regs[i].regmap[12]);
11015     if(regs[i].dirty&1) printf("r0 ");
11016     if((regs[i].dirty>>1)&1) printf("r1 ");
11017     if((regs[i].dirty>>2)&1) printf("r2 ");
11018     if((regs[i].dirty>>3)&1) printf("r3 ");
11019     if((regs[i].dirty>>4)&1) printf("r4 ");
11020     if((regs[i].dirty>>5)&1) printf("r5 ");
11021     if((regs[i].dirty>>6)&1) printf("r6 ");
11022     if((regs[i].dirty>>7)&1) printf("r7 ");
11023     if((regs[i].dirty>>8)&1) printf("r8 ");
11024     if((regs[i].dirty>>9)&1) printf("r9 ");
11025     if((regs[i].dirty>>10)&1) printf("r10 ");
11026     if((regs[i].dirty>>12)&1) printf("r12 ");
11027     #endif
11028     printf("\n");
11029     if(regs[i].isconst) {
11030       printf("constants: ");
11031       #if defined(__i386__) || defined(__x86_64__)
11032       if(regs[i].isconst&1) printf("eax=%x ",(int)constmap[i][0]);
11033       if((regs[i].isconst>>1)&1) printf("ecx=%x ",(int)constmap[i][1]);
11034       if((regs[i].isconst>>2)&1) printf("edx=%x ",(int)constmap[i][2]);
11035       if((regs[i].isconst>>3)&1) printf("ebx=%x ",(int)constmap[i][3]);
11036       if((regs[i].isconst>>5)&1) printf("ebp=%x ",(int)constmap[i][5]);
11037       if((regs[i].isconst>>6)&1) printf("esi=%x ",(int)constmap[i][6]);
11038       if((regs[i].isconst>>7)&1) printf("edi=%x ",(int)constmap[i][7]);
11039       #endif
11040       #ifdef __arm__
11041       if(regs[i].isconst&1) printf("r0=%x ",(int)constmap[i][0]);
11042       if((regs[i].isconst>>1)&1) printf("r1=%x ",(int)constmap[i][1]);
11043       if((regs[i].isconst>>2)&1) printf("r2=%x ",(int)constmap[i][2]);
11044       if((regs[i].isconst>>3)&1) printf("r3=%x ",(int)constmap[i][3]);
11045       if((regs[i].isconst>>4)&1) printf("r4=%x ",(int)constmap[i][4]);
11046       if((regs[i].isconst>>5)&1) printf("r5=%x ",(int)constmap[i][5]);
11047       if((regs[i].isconst>>6)&1) printf("r6=%x ",(int)constmap[i][6]);
11048       if((regs[i].isconst>>7)&1) printf("r7=%x ",(int)constmap[i][7]);
11049       if((regs[i].isconst>>8)&1) printf("r8=%x ",(int)constmap[i][8]);
11050       if((regs[i].isconst>>9)&1) printf("r9=%x ",(int)constmap[i][9]);
11051       if((regs[i].isconst>>10)&1) printf("r10=%x ",(int)constmap[i][10]);
11052       if((regs[i].isconst>>12)&1) printf("r12=%x ",(int)constmap[i][12]);
11053       #endif
11054       printf("\n");
11055     }
11056 #ifndef FORCE32
11057     printf(" 32:");
11058     for(r=0;r<=CCREG;r++) {
11059       if((regs[i].is32>>r)&1) {
11060         if(r==CCREG) printf(" CC");
11061         else if(r==HIREG) printf(" HI");
11062         else if(r==LOREG) printf(" LO");
11063         else printf(" r%d",r);
11064       }
11065     }
11066     printf("\n");
11067 #endif
11068     /*printf(" p32:");
11069     for(r=0;r<=CCREG;r++) {
11070       if((p32[i]>>r)&1) {
11071         if(r==CCREG) printf(" CC");
11072         else if(r==HIREG) printf(" HI");
11073         else if(r==LOREG) printf(" LO");
11074         else printf(" r%d",r);
11075       }
11076     }
11077     if(p32[i]!=regs[i].is32) printf(" NO MATCH\n");
11078     else printf("\n");*/
11079     if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP) {
11080       #if defined(__i386__) || defined(__x86_64__)
11081       printf("branch(%d): eax=%d ecx=%d edx=%d ebx=%d ebp=%d esi=%d edi=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7]);
11082       if(branch_regs[i].dirty&1) printf("eax ");
11083       if((branch_regs[i].dirty>>1)&1) printf("ecx ");
11084       if((branch_regs[i].dirty>>2)&1) printf("edx ");
11085       if((branch_regs[i].dirty>>3)&1) printf("ebx ");
11086       if((branch_regs[i].dirty>>5)&1) printf("ebp ");
11087       if((branch_regs[i].dirty>>6)&1) printf("esi ");
11088       if((branch_regs[i].dirty>>7)&1) printf("edi ");
11089       #endif
11090       #ifdef __arm__
11091       printf("branch(%d): r0=%d r1=%d r2=%d r3=%d r4=%d r5=%d r6=%d r7=%d r8=%d r9=%d r10=%d r12=%d dirty: ",i,branch_regs[i].regmap[0],branch_regs[i].regmap[1],branch_regs[i].regmap[2],branch_regs[i].regmap[3],branch_regs[i].regmap[4],branch_regs[i].regmap[5],branch_regs[i].regmap[6],branch_regs[i].regmap[7],branch_regs[i].regmap[8],branch_regs[i].regmap[9],branch_regs[i].regmap[10],branch_regs[i].regmap[12]);
11092       if(branch_regs[i].dirty&1) printf("r0 ");
11093       if((branch_regs[i].dirty>>1)&1) printf("r1 ");
11094       if((branch_regs[i].dirty>>2)&1) printf("r2 ");
11095       if((branch_regs[i].dirty>>3)&1) printf("r3 ");
11096       if((branch_regs[i].dirty>>4)&1) printf("r4 ");
11097       if((branch_regs[i].dirty>>5)&1) printf("r5 ");
11098       if((branch_regs[i].dirty>>6)&1) printf("r6 ");
11099       if((branch_regs[i].dirty>>7)&1) printf("r7 ");
11100       if((branch_regs[i].dirty>>8)&1) printf("r8 ");
11101       if((branch_regs[i].dirty>>9)&1) printf("r9 ");
11102       if((branch_regs[i].dirty>>10)&1) printf("r10 ");
11103       if((branch_regs[i].dirty>>12)&1) printf("r12 ");
11104       #endif
11105 #ifndef FORCE32
11106       printf(" 32:");
11107       for(r=0;r<=CCREG;r++) {
11108         if((branch_regs[i].is32>>r)&1) {
11109           if(r==CCREG) printf(" CC");
11110           else if(r==HIREG) printf(" HI");
11111           else if(r==LOREG) printf(" LO");
11112           else printf(" r%d",r);
11113         }
11114       }
11115       printf("\n");
11116 #endif
11117     }
11118   }
11119 #endif // DISASM
11120
11121   /* Pass 8 - Assembly */
11122   linkcount=0;stubcount=0;
11123   ds=0;is_delayslot=0;
11124   cop1_usable=0;
11125   uint64_t is32_pre=0;
11126   u_int dirty_pre=0;
11127   u_int beginning=(u_int)out;
11128   if((u_int)addr&1) {
11129     ds=1;
11130     pagespan_ds();
11131   }
11132   u_int instr_addr0_override=0;
11133
11134 #ifdef PCSX
11135   if (start == 0x80030000) {
11136     // nasty hack for fastbios thing
11137     // override block entry to this code
11138     instr_addr0_override=(u_int)out;
11139     emit_movimm(start,0);
11140     // abuse io address var as a flag that we
11141     // have already returned here once
11142     emit_readword((int)&address,1);
11143     emit_writeword(0,(int)&pcaddr);
11144     emit_writeword(0,(int)&address);
11145     emit_cmp(0,1);
11146     emit_jne((int)new_dyna_leave);
11147   }
11148 #endif
11149   for(i=0;i<slen;i++)
11150   {
11151     //if(ds) printf("ds: ");
11152     disassemble_inst(i);
11153     if(ds) {
11154       ds=0; // Skip delay slot
11155       if(bt[i]) assem_debug("OOPS - branch into delay slot\n");
11156       instr_addr[i]=0;
11157     } else {
11158       #ifndef DESTRUCTIVE_WRITEBACK
11159       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11160       {
11161         wb_sx(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,is32_pre,regs[i].was32,
11162               unneeded_reg[i],unneeded_reg_upper[i]);
11163         wb_valid(regmap_pre[i],regs[i].regmap_entry,dirty_pre,regs[i].wasdirty,is32_pre,
11164               unneeded_reg[i],unneeded_reg_upper[i]);
11165       }
11166       if((itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)&&!likely[i]) {
11167         is32_pre=branch_regs[i].is32;
11168         dirty_pre=branch_regs[i].dirty;
11169       }else{
11170         is32_pre=regs[i].is32;
11171         dirty_pre=regs[i].dirty;
11172       }
11173       #endif
11174       // write back
11175       if(i<2||(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000))
11176       {
11177         wb_invalidate(regmap_pre[i],regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32,
11178                       unneeded_reg[i],unneeded_reg_upper[i]);
11179         loop_preload(regmap_pre[i],regs[i].regmap_entry);
11180       }
11181       // branch target entry point
11182       instr_addr[i]=(u_int)out;
11183       assem_debug("<->\n");
11184       // load regs
11185       if(regs[i].regmap_entry[HOST_CCREG]==CCREG&&regs[i].regmap[HOST_CCREG]!=CCREG)
11186         wb_register(CCREG,regs[i].regmap_entry,regs[i].wasdirty,regs[i].was32);
11187       load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i],rs2[i]);
11188       address_generation(i,&regs[i],regs[i].regmap_entry);
11189       load_consts(regmap_pre[i],regs[i].regmap,regs[i].was32,i);
11190       if(itype[i]==RJUMP||itype[i]==UJUMP||itype[i]==CJUMP||itype[i]==SJUMP||itype[i]==FJUMP)
11191       {
11192         // Load the delay slot registers if necessary
11193         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i]&&(rs1[i+1]!=rt1[i]||rt1[i]==0))
11194           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11195         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i]&&(rs2[i+1]!=rt1[i]||rt1[i]==0))
11196           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11197         if(itype[i+1]==STORE||itype[i+1]==STORELR||(opcode[i+1]&0x3b)==0x39||(opcode[i+1]&0x3b)==0x3a)
11198           load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11199       }
11200       else if(i+1<slen)
11201       {
11202         // Preload registers for following instruction
11203         if(rs1[i+1]!=rs1[i]&&rs1[i+1]!=rs2[i])
11204           if(rs1[i+1]!=rt1[i]&&rs1[i+1]!=rt2[i])
11205             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs1[i+1],rs1[i+1]);
11206         if(rs2[i+1]!=rs1[i+1]&&rs2[i+1]!=rs1[i]&&rs2[i+1]!=rs2[i])
11207           if(rs2[i+1]!=rt1[i]&&rs2[i+1]!=rt2[i])
11208             load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,rs2[i+1],rs2[i+1]);
11209       }
11210       // TODO: if(is_ooo(i)) address_generation(i+1);
11211       if(itype[i]==CJUMP||itype[i]==FJUMP)
11212         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,CCREG,CCREG);
11213       if(itype[i]==STORE||itype[i]==STORELR||(opcode[i]&0x3b)==0x39||(opcode[i]&0x3b)==0x3a)
11214         load_regs(regs[i].regmap_entry,regs[i].regmap,regs[i].was32,INVCP,INVCP);
11215       if(bt[i]) cop1_usable=0;
11216       // assemble
11217       switch(itype[i]) {
11218         case ALU:
11219           alu_assemble(i,&regs[i]);break;
11220         case IMM16:
11221           imm16_assemble(i,&regs[i]);break;
11222         case SHIFT:
11223           shift_assemble(i,&regs[i]);break;
11224         case SHIFTIMM:
11225           shiftimm_assemble(i,&regs[i]);break;
11226         case LOAD:
11227           load_assemble(i,&regs[i]);break;
11228         case LOADLR:
11229           loadlr_assemble(i,&regs[i]);break;
11230         case STORE:
11231           store_assemble(i,&regs[i]);break;
11232         case STORELR:
11233           storelr_assemble(i,&regs[i]);break;
11234         case COP0:
11235           cop0_assemble(i,&regs[i]);break;
11236         case COP1:
11237           cop1_assemble(i,&regs[i]);break;
11238         case C1LS:
11239           c1ls_assemble(i,&regs[i]);break;
11240         case COP2:
11241           cop2_assemble(i,&regs[i]);break;
11242         case C2LS:
11243           c2ls_assemble(i,&regs[i]);break;
11244         case C2OP:
11245           c2op_assemble(i,&regs[i]);break;
11246         case FCONV:
11247           fconv_assemble(i,&regs[i]);break;
11248         case FLOAT:
11249           float_assemble(i,&regs[i]);break;
11250         case FCOMP:
11251           fcomp_assemble(i,&regs[i]);break;
11252         case MULTDIV:
11253           multdiv_assemble(i,&regs[i]);break;
11254         case MOV:
11255           mov_assemble(i,&regs[i]);break;
11256         case SYSCALL:
11257           syscall_assemble(i,&regs[i]);break;
11258         case HLECALL:
11259           hlecall_assemble(i,&regs[i]);break;
11260         case INTCALL:
11261           intcall_assemble(i,&regs[i]);break;
11262         case UJUMP:
11263           ujump_assemble(i,&regs[i]);ds=1;break;
11264         case RJUMP:
11265           rjump_assemble(i,&regs[i]);ds=1;break;
11266         case CJUMP:
11267           cjump_assemble(i,&regs[i]);ds=1;break;
11268         case SJUMP:
11269           sjump_assemble(i,&regs[i]);ds=1;break;
11270         case FJUMP:
11271           fjump_assemble(i,&regs[i]);ds=1;break;
11272         case SPAN:
11273           pagespan_assemble(i,&regs[i]);break;
11274       }
11275       if(itype[i]==UJUMP||itype[i]==RJUMP||(source[i]>>16)==0x1000)
11276         literal_pool(1024);
11277       else
11278         literal_pool_jumpover(256);
11279     }
11280   }
11281   //assert(itype[i-2]==UJUMP||itype[i-2]==RJUMP||(source[i-2]>>16)==0x1000);
11282   // If the block did not end with an unconditional branch,
11283   // add a jump to the next instruction.
11284   if(i>1) {
11285     if(itype[i-2]!=UJUMP&&itype[i-2]!=RJUMP&&(source[i-2]>>16)!=0x1000&&itype[i-1]!=SPAN) {
11286       assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11287       assert(i==slen);
11288       if(itype[i-2]!=CJUMP&&itype[i-2]!=SJUMP&&itype[i-2]!=FJUMP) {
11289         store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11290         if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11291           emit_loadreg(CCREG,HOST_CCREG);
11292         emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11293       }
11294       else if(!likely[i-2])
11295       {
11296         store_regs_bt(branch_regs[i-2].regmap,branch_regs[i-2].is32,branch_regs[i-2].dirty,start+i*4);
11297         assert(branch_regs[i-2].regmap[HOST_CCREG]==CCREG);
11298       }
11299       else
11300       {
11301         store_regs_bt(regs[i-2].regmap,regs[i-2].is32,regs[i-2].dirty,start+i*4);
11302         assert(regs[i-2].regmap[HOST_CCREG]==CCREG);
11303       }
11304       add_to_linker((int)out,start+i*4,0);
11305       emit_jmp(0);
11306     }
11307   }
11308   else
11309   {
11310     assert(i>0);
11311     assert(itype[i-1]!=UJUMP&&itype[i-1]!=CJUMP&&itype[i-1]!=SJUMP&&itype[i-1]!=RJUMP&&itype[i-1]!=FJUMP);
11312     store_regs_bt(regs[i-1].regmap,regs[i-1].is32,regs[i-1].dirty,start+i*4);
11313     if(regs[i-1].regmap[HOST_CCREG]!=CCREG)
11314       emit_loadreg(CCREG,HOST_CCREG);
11315     emit_addimm(HOST_CCREG,CLOCK_DIVIDER*(ccadj[i-1]+1),HOST_CCREG);
11316     add_to_linker((int)out,start+i*4,0);
11317     emit_jmp(0);
11318   }
11319
11320   // TODO: delay slot stubs?
11321   // Stubs
11322   for(i=0;i<stubcount;i++)
11323   {
11324     switch(stubs[i][0])
11325     {
11326       case LOADB_STUB:
11327       case LOADH_STUB:
11328       case LOADW_STUB:
11329       case LOADD_STUB:
11330       case LOADBU_STUB:
11331       case LOADHU_STUB:
11332         do_readstub(i);break;
11333       case STOREB_STUB:
11334       case STOREH_STUB:
11335       case STOREW_STUB:
11336       case STORED_STUB:
11337         do_writestub(i);break;
11338       case CC_STUB:
11339         do_ccstub(i);break;
11340       case INVCODE_STUB:
11341         do_invstub(i);break;
11342       case FP_STUB:
11343         do_cop1stub(i);break;
11344       case STORELR_STUB:
11345         do_unalignedwritestub(i);break;
11346     }
11347   }
11348
11349   if (instr_addr0_override)
11350     instr_addr[0] = instr_addr0_override;
11351
11352   /* Pass 9 - Linker */
11353   for(i=0;i<linkcount;i++)
11354   {
11355     assem_debug("%8x -> %8x\n",link_addr[i][0],link_addr[i][1]);
11356     literal_pool(64);
11357     if(!link_addr[i][2])
11358     {
11359       void *stub=out;
11360       void *addr=check_addr(link_addr[i][1]);
11361       emit_extjump(link_addr[i][0],link_addr[i][1]);
11362       if(addr) {
11363         set_jump_target(link_addr[i][0],(int)addr);
11364         add_link(link_addr[i][1],stub);
11365       }
11366       else set_jump_target(link_addr[i][0],(int)stub);
11367     }
11368     else
11369     {
11370       // Internal branch
11371       int target=(link_addr[i][1]-start)>>2;
11372       assert(target>=0&&target<slen);
11373       assert(instr_addr[target]);
11374       //#ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11375       //set_jump_target_fillslot(link_addr[i][0],instr_addr[target],link_addr[i][2]>>1);
11376       //#else
11377       set_jump_target(link_addr[i][0],instr_addr[target]);
11378       //#endif
11379     }
11380   }
11381   // External Branch Targets (jump_in)
11382   if(copy+slen*4>(void *)shadow+sizeof(shadow)) copy=shadow;
11383   for(i=0;i<slen;i++)
11384   {
11385     if(bt[i]||i==0)
11386     {
11387       if(instr_addr[i]) // TODO - delay slots (=null)
11388       {
11389         u_int vaddr=start+i*4;
11390         u_int page=get_page(vaddr);
11391         u_int vpage=get_vpage(vaddr);
11392         literal_pool(256);
11393         //if(!(is32[i]&(~unneeded_reg_upper[i])&~(1LL<<CCREG)))
11394 #ifndef FORCE32
11395         if(!requires_32bit[i])
11396 #else
11397         if(1)
11398 #endif
11399         {
11400           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11401           assem_debug("jump_in: %x\n",start+i*4);
11402           ll_add(jump_dirty+vpage,vaddr,(void *)out);
11403           int entry_point=do_dirty_stub(i);
11404           ll_add(jump_in+page,vaddr,(void *)entry_point);
11405           // If there was an existing entry in the hash table,
11406           // replace it with the new address.
11407           // Don't add new entries.  We'll insert the
11408           // ones that actually get used in check_addr().
11409           int *ht_bin=hash_table[((vaddr>>16)^vaddr)&0xFFFF];
11410           if(ht_bin[0]==vaddr) {
11411             ht_bin[1]=entry_point;
11412           }
11413           if(ht_bin[2]==vaddr) {
11414             ht_bin[3]=entry_point;
11415           }
11416         }
11417         else
11418         {
11419           u_int r=requires_32bit[i]|!!(requires_32bit[i]>>32);
11420           assem_debug("%8x (%d) <- %8x\n",instr_addr[i],i,start+i*4);
11421           assem_debug("jump_in: %x (restricted - %x)\n",start+i*4,r);
11422           //int entry_point=(int)out;
11423           ////assem_debug("entry_point: %x\n",entry_point);
11424           //load_regs_entry(i);
11425           //if(entry_point==(int)out)
11426           //  entry_point=instr_addr[i];
11427           //else
11428           //  emit_jmp(instr_addr[i]);
11429           //ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11430           ll_add_32(jump_dirty+vpage,vaddr,r,(void *)out);
11431           int entry_point=do_dirty_stub(i);
11432           ll_add_32(jump_in+page,vaddr,r,(void *)entry_point);
11433         }
11434       }
11435     }
11436   }
11437   // Write out the literal pool if necessary
11438   literal_pool(0);
11439   #ifdef CORTEX_A8_BRANCH_PREDICTION_HACK
11440   // Align code
11441   if(((u_int)out)&7) emit_addnop(13);
11442   #endif
11443   assert((u_int)out-beginning<MAX_OUTPUT_BLOCK_SIZE);
11444   //printf("shadow buffer: %x-%x\n",(int)copy,(int)copy+slen*4);
11445   memcpy(copy,source,slen*4);
11446   copy+=slen*4;
11447   
11448   #ifdef __arm__
11449   __clear_cache((void *)beginning,out);
11450   #endif
11451   
11452   // If we're within 256K of the end of the buffer,
11453   // start over from the beginning. (Is 256K enough?)
11454   if((int)out>BASE_ADDR+(1<<TARGET_SIZE_2)-MAX_OUTPUT_BLOCK_SIZE) out=(u_char *)BASE_ADDR;
11455   
11456   // Trap writes to any of the pages we compiled
11457   for(i=start>>12;i<=(start+slen*4)>>12;i++) {
11458     invalid_code[i]=0;
11459 #ifndef DISABLE_TLB
11460     memory_map[i]|=0x40000000;
11461     if((signed int)start>=(signed int)0xC0000000) {
11462       assert(using_tlb);
11463       j=(((u_int)i<<12)+(memory_map[i]<<2)-(u_int)rdram+(u_int)0x80000000)>>12;
11464       invalid_code[j]=0;
11465       memory_map[j]|=0x40000000;
11466       //printf("write protect physical page: %x (virtual %x)\n",j<<12,start);
11467     }
11468 #endif
11469   }
11470   inv_code_start=inv_code_end=~0;
11471 #ifdef PCSX
11472   // for PCSX we need to mark all mirrors too
11473   if(get_page(start)<(RAM_SIZE>>12))
11474     for(i=start>>12;i<=(start+slen*4)>>12;i++)
11475       invalid_code[((u_int)0x00000000>>12)|(i&0x1ff)]=
11476       invalid_code[((u_int)0x80000000>>12)|(i&0x1ff)]=
11477       invalid_code[((u_int)0xa0000000>>12)|(i&0x1ff)]=0;
11478 #endif
11479   
11480   /* Pass 10 - Free memory by expiring oldest blocks */
11481   
11482   int end=((((int)out-BASE_ADDR)>>(TARGET_SIZE_2-16))+16384)&65535;
11483   while(expirep!=end)
11484   {
11485     int shift=TARGET_SIZE_2-3; // Divide into 8 blocks
11486     int base=BASE_ADDR+((expirep>>13)<<shift); // Base address of this block
11487     inv_debug("EXP: Phase %d\n",expirep);
11488     switch((expirep>>11)&3)
11489     {
11490       case 0:
11491         // Clear jump_in and jump_dirty
11492         ll_remove_matching_addrs(jump_in+(expirep&2047),base,shift);
11493         ll_remove_matching_addrs(jump_dirty+(expirep&2047),base,shift);
11494         ll_remove_matching_addrs(jump_in+2048+(expirep&2047),base,shift);
11495         ll_remove_matching_addrs(jump_dirty+2048+(expirep&2047),base,shift);
11496         break;
11497       case 1:
11498         // Clear pointers
11499         ll_kill_pointers(jump_out[expirep&2047],base,shift);
11500         ll_kill_pointers(jump_out[(expirep&2047)+2048],base,shift);
11501         break;
11502       case 2:
11503         // Clear hash table
11504         for(i=0;i<32;i++) {
11505           int *ht_bin=hash_table[((expirep&2047)<<5)+i];
11506           if((ht_bin[3]>>shift)==(base>>shift) ||
11507              ((ht_bin[3]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11508             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[2],ht_bin[3]);
11509             ht_bin[2]=ht_bin[3]=-1;
11510           }
11511           if((ht_bin[1]>>shift)==(base>>shift) ||
11512              ((ht_bin[1]-MAX_OUTPUT_BLOCK_SIZE)>>shift)==(base>>shift)) {
11513             inv_debug("EXP: Remove hash %x -> %x\n",ht_bin[0],ht_bin[1]);
11514             ht_bin[0]=ht_bin[2];
11515             ht_bin[1]=ht_bin[3];
11516             ht_bin[2]=ht_bin[3]=-1;
11517           }
11518         }
11519         break;
11520       case 3:
11521         // Clear jump_out
11522         #ifdef __arm__
11523         if((expirep&2047)==0) 
11524           do_clear_cache();
11525         #endif
11526         ll_remove_matching_addrs(jump_out+(expirep&2047),base,shift);
11527         ll_remove_matching_addrs(jump_out+2048+(expirep&2047),base,shift);
11528         break;
11529     }
11530     expirep=(expirep+1)&65535;
11531   }
11532   return 0;
11533 }
11534
11535 // vim:shiftwidth=2:expandtab